Skip to content

Commit 5ce718b

Browse files
committed
Update License Plate Recognition System
1 parent 6f473b9 commit 5ce718b

File tree

7 files changed

+1392
-1
lines changed

7 files changed

+1392
-1
lines changed

License Plate Recognition System/LP.cpp

Lines changed: 800 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# 车牌号码识别算法
2+
3+
- LP.cpp 利用OpenCV实现了对车牌区域的检测,判断,切割,以及调用Python脚本进行预测字符
4+
- train-license-digits.py 实现对英文和字数字字符识别模型进行训练
5+
- train-license-province.py 实现对汉字字符识别模型进行训练
6+
- estimate_chinese.py 利用汉字字符识别模型对省份字符进行预测
7+
- estimate_character.py 利用英文和数字字符识别模型对英文和数字进行预测
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import sys
2+
import os
3+
import time
4+
import random
5+
import numpy as np
6+
import tensorflow as tf
7+
import cv2
8+
from PIL import Image
9+
import io
10+
import Image, ImageFont, ImageDraw
11+
import pygame
12+
INPUT_SIZE = 784
13+
WIDTH = 28
14+
HEIGHT = 28
15+
NUM_CLASSES = 34
16+
17+
SAVER_DIR = "python/train-saver/digits/"
18+
19+
LETTERS_DIGITS = ("0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z")
20+
license_num = ""
21+
time_begin = time.time()
22+
# 定义输入节点,对应于图片像素值矩阵集合和图片标签(即所代表的数字)
23+
x = tf.placeholder(tf.float32, shape=[None, INPUT_SIZE])
24+
y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])
25+
x_image = tf.reshape(x, [-1, WIDTH, HEIGHT, 1])
26+
def conv_layer(inputs, W, b, conv_strides, kernel_size, pool_strides, padding):
27+
L1_conv = tf.nn.conv2d(inputs, W, strides=conv_strides, padding=padding)
28+
L1_relu = tf.nn.relu(L1_conv + b)
29+
return tf.nn.max_pool(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME')
30+
# 定义全连接层函数
31+
def full_connect(inputs, W, b):
32+
return tf.nn.relu(tf.matmul(inputs, W) + b)
33+
34+
pygame.init()
35+
36+
if __name__ =='__main__':
37+
start = time.time()
38+
saver = tf.train.import_meta_graph("%smodel.ckpt.meta"%(SAVER_DIR))
39+
with tf.Session() as sess:
40+
model_file=tf.train.latest_checkpoint(SAVER_DIR)
41+
saver.restore(sess, model_file)
42+
43+
# 第一个卷积层
44+
W_conv1 = sess.graph.get_tensor_by_name("W_conv1:0")
45+
b_conv1 = sess.graph.get_tensor_by_name("b_conv1:0")
46+
conv_strides = [1, 1, 1, 1]
47+
kernel_size = [1, 2, 2, 1]
48+
pool_strides = [1, 2, 2, 1]
49+
L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')
50+
# 第二个卷积层
51+
W_conv2 = sess.graph.get_tensor_by_name("W_conv2:0")
52+
b_conv2 = sess.graph.get_tensor_by_name("b_conv2:0")
53+
conv_strides = [1, 1, 1, 1]
54+
kernel_size = [1, 2, 2, 1]
55+
pool_strides = [1, 2, 2, 1]
56+
L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
57+
# 全连接层
58+
W_fc1 = sess.graph.get_tensor_by_name("W_fc1:0")
59+
b_fc1 = sess.graph.get_tensor_by_name("b_fc1:0")
60+
h_pool2_flat = tf.reshape(L2_pool, [-1, 7 * 7 * 64])
61+
h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)
62+
# dropout
63+
keep_prob = tf.placeholder(tf.float32)
64+
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
65+
# readout层
66+
W_fc2 = sess.graph.get_tensor_by_name("W_fc2:0")
67+
b_fc2 = sess.graph.get_tensor_by_name("b_fc2:0")
68+
# 定义优化器和训练op
69+
conv_result = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
70+
for n in range(2,8):
71+
path = "python/zxytest_images/%s.jpg" % (n)
72+
nowimage = Image.open(path)
73+
nowimage = nowimage.convert('1')
74+
width = nowimage.size[0]
75+
height = nowimage.size[1]
76+
img_input = [[0]*INPUT_SIZE for i in range(1)]
77+
for h in range(0, height):
78+
for w in range(0, width):
79+
if nowimage.getpixel((w, h)) < 190:
80+
img_input[0][w+h*width] = 1
81+
else:
82+
img_input[0][w+h*width] = 0
83+
result = sess.run(conv_result, feed_dict = {x: np.array(img_input), keep_prob: 1.0})
84+
maxx1 = 0
85+
maxx1_index = 0
86+
for i in range(NUM_CLASSES):
87+
if result[0][i] > maxx1:
88+
maxx1 = result[0][i]
89+
maxx1_index = i
90+
license_num = license_num + LETTERS_DIGITS[maxx1_index]
91+
y = license_num
92+
text = "其它字符识别结果为:%s"%y
93+
im = Image.new("RGB", (300, 300), (255, 255, 255))
94+
font = pygame.font.Font(os.path.join("python/fonts/", "msyh.ttc"), 34)
95+
rtext = font.render(text, True, (0, 0, 0), (255, 255, 255))
96+
pygame.image.save(rtext, "python/result/3.jpg")
97+
mat = cv2.imread("python/result/3.jpg")
98+
cv2.namedWindow("now")
99+
cv2.moveWindow("now", 430, 400)
100+
cv2.imshow("now", mat)
101+
cv2.waitKey(0)
102+
cv2.destroyAllWindows()
103+
writer=tf.summary.FileWriter("logs/",sess.graph)
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
#!/usr/bin/python3.5
2+
# -*- coding: utf-8 -*-
3+
4+
import sys
5+
import os
6+
import time
7+
import random
8+
import cv2
9+
import numpy as np
10+
import tensorflow as tf
11+
from PIL import Image
12+
import io
13+
import Image, ImageFont, ImageDraw
14+
import pygame
15+
16+
SIZE = 1280
17+
WIDTH = 32
18+
HEIGHT = 40
19+
NUM_CLASSES = 31
20+
iterations = 300
21+
pygame.init()
22+
SAVER_DIR = "python/train-saver/province/"
23+
24+
PROVINCES = ("川","鄂","赣","甘","贵","桂","黑","沪","冀","津","京","吉","辽","鲁","蒙","闽","宁","青","琼","陕","苏","晋","皖","湘","新","豫","渝","粤","云","藏","浙")
25+
# 定义输入节点,对应于图片像素值矩阵集合和图片标签(即所代表的数字)
26+
x = tf.placeholder(tf.float32, shape=[None, SIZE])
27+
y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])
28+
29+
x_image = tf.reshape(x, [-1, WIDTH, HEIGHT, 1])
30+
# 定义卷积函数
31+
def conv_layer(inputs, W, b, conv_strides, kernel_size, pool_strides, padding):
32+
L1_conv = tf.nn.conv2d(inputs, W, strides=conv_strides, padding=padding)
33+
L1_relu = tf.nn.relu(L1_conv + b)
34+
return tf.nn.max_pool(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME')
35+
36+
# 定义全连接层函数
37+
def full_connect(inputs, W, b):
38+
return tf.nn.relu(tf.matmul(inputs, W) + b)
39+
40+
if __name__ =='__main__':
41+
start = time.time()
42+
saver = tf.train.import_meta_graph("%smodel.ckpt.meta"%(SAVER_DIR))
43+
with tf.Session() as sess:
44+
model_file=tf.train.latest_checkpoint(SAVER_DIR)
45+
saver.restore(sess, model_file)
46+
# 第一个卷积层
47+
W_conv1 = sess.graph.get_tensor_by_name("W_conv1:0")
48+
b_conv1 = sess.graph.get_tensor_by_name("b_conv1:0")
49+
conv_strides = [1, 1, 1, 1]
50+
kernel_size = [1, 2, 2, 1]
51+
pool_strides = [1, 2, 2, 1]
52+
L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')
53+
# 第二个卷积层
54+
W_conv2 = sess.graph.get_tensor_by_name("W_conv2:0")
55+
b_conv2 = sess.graph.get_tensor_by_name("b_conv2:0")
56+
conv_strides = [1, 1, 1, 1]
57+
kernel_size = [1, 1, 1, 1]
58+
pool_strides = [1, 1, 1, 1]
59+
L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
60+
# 全连接层
61+
W_fc1 = sess.graph.get_tensor_by_name("W_fc1:0")
62+
b_fc1 = sess.graph.get_tensor_by_name("b_fc1:0")
63+
h_pool2_flat = tf.reshape(L2_pool, [-1, 16 * 20*32])
64+
h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)
65+
# dropout
66+
keep_prob = tf.placeholder(tf.float32)
67+
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
68+
# readout层
69+
W_fc2 = sess.graph.get_tensor_by_name("W_fc2:0")
70+
b_fc2 = sess.graph.get_tensor_by_name("b_fc2:0")
71+
# 定义优化器和训练op
72+
conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
73+
for n in range(1,2):
74+
path = "python/zxytest_images/%s.jpg" % (n)
75+
img = Image.open(path)
76+
width = img.size[0]
77+
height = img.size[1]
78+
print(width)
79+
print(height)
80+
img_data = [[0]*SIZE for i in range(1)]
81+
for h in range(0, height):
82+
for w in range(0, width):
83+
if img.getpixel((w, h)) < 190:
84+
img_data[0][w+h*width] = 1
85+
else:
86+
img_data[0][w+h*width] = 0
87+
result = sess.run(conv, feed_dict = {x: np.array(img_data), keep_prob: 1.0})
88+
max1 = 0
89+
max1_index = 0
90+
for j in range(NUM_CLASSES):
91+
if result[0][j] > max1:
92+
max1 = result[0][j]
93+
max1_index = j
94+
continue
95+
nProvinceIndex = max1_index
96+
text = "中文字符为:%s"%PROVINCES[nProvinceIndex]
97+
im = Image.new("RGB", (300, 50), (255, 255, 255))
98+
font = pygame.font.Font(os.path.join("python/fonts/", "msyh.ttc"), 34)
99+
rtext = font.render(text, True, (0, 0, 0), (255, 255, 255))
100+
pygame.image.save(rtext, "python/result/2.jpg")
101+
mat = cv2.imread("python/result/2.jpg")
102+
cv2.namedWindow("now")
103+
cv2.moveWindow("now", 200, 400)
104+
cv2.imshow("now", mat);
105+
cv2.waitKey(0)
106+
end = time.time()
Lines changed: 177 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,177 @@
1+
#!/usr/bin/python3.5
2+
# -*- coding: utf-8 -*-
3+
4+
import sys
5+
import os
6+
import time
7+
import random
8+
9+
import numpy as np
10+
import tensorflow as tf
11+
import cv2
12+
from PIL import Image
13+
14+
15+
SIZE = 784
16+
WIDTH = 28
17+
HEIGHT = 28
18+
NUM_CLASSES = 34
19+
iterations = 100
20+
21+
SAVER_DIR = "python/train-saver/digits/"
22+
23+
LETTERS_DIGITS = ("0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z")
24+
license_num = ""
25+
time_begin = time.time()
26+
# 定义输入节点,对应于图片像素值矩阵集合和图片标签(即所代表的数字)
27+
x = tf.placeholder(tf.float32, shape=[None, SIZE])
28+
y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])
29+
x_image = tf.reshape(x, [-1, WIDTH, HEIGHT, 1])
30+
# 定义卷积函数
31+
def conv_layer(inputs, W, b, conv_strides, kernel_size, pool_strides, padding):
32+
L1_conv = tf.nn.conv2d(inputs, W, strides=conv_strides, padding=padding)
33+
L1_relu = tf.nn.relu(L1_conv + b)
34+
return tf.nn.max_pool(L1_relu, ksize=kernel_size, strides=pool_strides, padding='SAME')
35+
# 定义全连接层函数
36+
def full_connect(inputs, W, b):
37+
return tf.nn.relu(tf.matmul(inputs, W) + b)
38+
if __name__ =='__main__':
39+
# 第一次遍历图片目录是为了获取图片总数
40+
input_count = 0
41+
for i in range(0,NUM_CLASSES):
42+
dir = 'python/train_images/training-set/%s/' % i
43+
for rt, dirs, files in os.walk(dir):
44+
for filename in files:
45+
input_count += 1
46+
47+
# 定义对应维数和各维长度的数组
48+
input_images = np.array([[0]*SIZE for i in range(input_count)])
49+
input_labels = np.array([[0]*NUM_CLASSES for i in range(input_count)])
50+
51+
# 第二次遍历图片目录是为了生成图片数据和标签
52+
index = 0
53+
for i in range(0,NUM_CLASSES):
54+
dir = 'python/train_images/training-set/%s/' % i
55+
for rt, dirs, files in os.walk(dir):
56+
for filename in files:
57+
filename = dir + filename
58+
img = Image.open(filename)
59+
img = img.convert('1')
60+
width = img.size[0]
61+
height = img.size[1]
62+
for h in range(0, height):
63+
for w in range(0, width):
64+
# 通过这样的处理,使数字的线条变细,有利于提高识别准确率
65+
if img.getpixel((w, h)) > 230:
66+
input_images[index][w+h*width] = 0
67+
else:
68+
input_images[index][w+h*width] = 1
69+
input_labels[index][i] = 1
70+
index += 1
71+
# 第一次遍历图片目录是为了获取图片总数
72+
val_count = 0
73+
for i in range(0,NUM_CLASSES):
74+
dir = 'python/train_images/validation-set/%s/' % i
75+
for rt, dirs, files in os.walk(dir):
76+
for filename in files:
77+
val_count += 1
78+
79+
# 定义对应维数和各维长度的数组
80+
val_images = np.array([[0]*SIZE for i in range(val_count)])
81+
val_labels = np.array([[0]*NUM_CLASSES for i in range(val_count)])
82+
83+
# 第二次遍历图片目录是为了生成图片数据和标签
84+
index = 0
85+
for i in range(0,NUM_CLASSES):
86+
dir = 'python/train_images/validation-set/%s/' % i
87+
for rt, dirs, files in os.walk(dir):
88+
for filename in files:
89+
filename = dir + filename
90+
img = Image.open(filename)
91+
img = img.convert('1')
92+
width = img.size[0]
93+
height = img.size[1]
94+
for h in range(0, height):
95+
for w in range(0, width):
96+
# 通过这样的处理,使数字的线条变细,有利于提高识别准确率
97+
if img.getpixel((w, h)) > 230:
98+
val_images[index][w+h*width] = 0
99+
else:
100+
val_images[index][w+h*width] = 1
101+
val_labels[index][i] = 1
102+
index += 1
103+
104+
with tf.Session() as sess:
105+
# 第一个卷积层
106+
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name="W_conv1")
107+
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]), name="b_conv1")
108+
conv_strides = [1, 1, 1, 1]
109+
kernel_size = [1, 2, 2, 1]
110+
pool_strides = [1, 2, 2, 1]
111+
L1_pool = conv_layer(x_image, W_conv1, b_conv1, conv_strides, kernel_size, pool_strides, padding='SAME')
112+
# 第二个卷积层
113+
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name="W_conv2")
114+
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]), name="b_conv2")
115+
conv_strides = [1, 1, 1, 1]
116+
kernel_size = [1, 2, 2, 1]
117+
pool_strides = [1, 2, 2, 1]
118+
L2_pool = conv_layer(L1_pool, W_conv2, b_conv2, conv_strides, kernel_size, pool_strides, padding='SAME')
119+
# 全连接层
120+
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name="W_fc1")
121+
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name="b_fc1")
122+
h_pool2_flat = tf.reshape(L2_pool, [-1, 7 * 7 * 64])
123+
h_fc1 = full_connect(h_pool2_flat, W_fc1, b_fc1)
124+
# dropout
125+
keep_prob = tf.placeholder(tf.float32)
126+
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
127+
# readout层
128+
W_fc2 = tf.Variable(tf.truncated_normal([1024, NUM_CLASSES], stddev=0.1), name="W_fc2")
129+
b_fc2 = tf.Variable(tf.constant(0.1, shape=[NUM_CLASSES]), name="b_fc2")
130+
131+
# 定义优化器和训练op
132+
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
133+
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
134+
train_step = tf.train.AdamOptimizer((1e-4)).minimize(cross_entropy)
135+
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
136+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
137+
sess.run(tf.global_variables_initializer())
138+
time_elapsed = time.time() - time_begin
139+
print("读取图片文件耗费时间:%d秒" % time_elapsed)
140+
time_begin = time.time()
141+
print ("一共读取了 %s 个训练图像, %s 个标签" % (input_count, input_count))
142+
# 设置每次训练op的输入个数和迭代次数,这里为了支持任意图片总数,定义了一个余数remainder,譬如,如果每次训练op的输入个数为60,图片总数为150张,则前面两次各输入60张,最后一次输入30张(余数30)
143+
batch_size = 60
144+
iterations = iterations
145+
batches_count = int(input_count / batch_size)
146+
remainder = input_count % batch_size
147+
print ("训练数据集分成 %s 批, 前面每批 %s 个数据,最后一批 %s 个数据" % (batches_count+1, batch_size, remainder))
148+
149+
# 执行训练迭代
150+
for it in range(iterations):
151+
# 这里的关键是要把输入数组转为np.array
152+
for n in range(batches_count):
153+
train_step.run(feed_dict={x: input_images[n*batch_size:(n+1)*batch_size], y_: input_labels[n*batch_size:(n+1)*batch_size], keep_prob: 0.5})
154+
if remainder > 0:
155+
start_index = batches_count * batch_size;
156+
train_step.run(feed_dict={x: input_images[start_index:input_count-1], y_: input_labels[start_index:input_count-1], keep_prob: 0.5})
157+
158+
# 每完成五次迭代,判断准确度是否已达到100%,达到则退出迭代循环
159+
iterate_accuracy = 0
160+
if it%1 == 0:
161+
iterate_accuracy = accuracy.eval(feed_dict={x: val_images, y_: val_labels, keep_prob: 1.0})
162+
print ('第 %d 次训练迭代: 准确率 %0.5f%%' % (it, iterate_accuracy*100))
163+
if iterate_accuracy >= 0.9999 and it >= iterations:
164+
break
165+
166+
print ('完成训练!')
167+
time_elapsed = time.time() - time_begin
168+
print ("训练耗费时间:%d秒" % time_elapsed)
169+
time_begin = time.time()
170+
171+
# 保存训练结果
172+
if not os.path.exists(SAVER_DIR):
173+
print ('不存在训练数据保存目录,现在创建保存目录')
174+
os.makedirs(SAVER_DIR)
175+
# 初始化saver
176+
saver = tf.train.Saver()
177+
saver_path = saver.save(sess, "%smodel.ckpt"%(SAVER_DIR))

0 commit comments

Comments
 (0)