赞
踩
所用图片:
代码:01.py
import cv2 import numpy as np from matplotlib import pyplot as plt from utilsW.utils import cvShow, drawRect def plt_show0(img): b, g, r = cv2.split(img) img = cv2.merge([r, g, b]) plt.imshow(img) plt.show() def plt_show(img): plt.imshow(img) plt.show() if __name__ == '__main__': # 1, load image src = cv2.imread("carcode.jpg") cvShow("7", src) # drawRect(src, 215, 222, 152, 50) # 2,高斯去噪 gaussImg = cv2.GaussianBlur(src, (3, 3), 0) cvShow("gaussImg", gaussImg) #3,灰度化 gray = cv2.cvtColor(gaussImg, cv2.COLOR_BGR2GRAY) #4,边缘检测 sobel_x = cv2.Sobel(gray, cv2.CV_16S, 1, 0) absX = cv2.convertScaleAbs(sobel_x) image = absX cvShow("2", image) #5,自适应阈值处理 ret, image = cv2.threshold(image, 0, 255, cv2.THRESH_OTSU) cvShow("3",image) #6, 形态学手段,闭运算,让白色部分凝练成整体 kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (17,5)) print(kernelX) image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernelX, iterations=3) cvShow("4",image) kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (20,1)) KernelY = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 19)) image = cv2.dilate(image, kernelX) image = cv2.erode(image, KernelY) image = cv2.erode(image, kernelX) image = cv2.dilate(image, KernelY) cvShow("5", image) image = cv2.medianBlur(image, 15) cvShow("6", image) #7,轮廓检测 contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] new = src.copy() #绘制轮廓 cv2.drawContours(new, contours, -1, (0, 0, 255), 2) cvShow("7", new) #筛选出车牌位置的轮廓: for item in contours: rect = cv2.boundingRect(item) x = rect[0] y = rect[1] w = rect[2] h = rect[3] if y >=215 and y <250: image = src[y:y+h, x:x+w] cvShow("10", image) cv2.imwrite("test1.png", image) pass
代码2:02.py
import cv2 import cv2 as cv import numpy as np from utilsW.utils import cvShow if __name__ == '__main__': #1, load image src = cv.imread("test1.png") cvShow("src", src) #2, gauss to dry and to gray gauss = cv.GaussianBlur(src, (3, 3), 0) gray = cv.cvtColor(gauss, cv2.COLOR_BGR2GRAY) cvShow("gray", gray) #3, process for threshold thresholdImg = cv.threshold(gray, 0, 255, cv.THRESH_OTSU)[1] cvShow("thresholdImg", thresholdImg) #4, process for threshold area_white, area_black = 0, 0 h, w = thresholdImg.shape print(thresholdImg.shape) for i in range(h): for j in range(w): if thresholdImg[i, j] == 255: area_white +=1 else: area_black +=1 if area_white > area_black: thresholdImg = cv.threshold(gray, 0, 255, cv.THRESH_OTSU | cv.THRESH_BINARY_INV)[1] cvShow("thresholdImg", thresholdImg) #5, process for morph cvet kernel = cv.getStructuringElement(cv.MORPH_RECT, (2,2)) dilateImg = cv.dilate(thresholdImg, kernel) cvShow("dilateImg", dilateImg) #6, find contours contours = cv.findContours(dilateImg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[0] srcCopy = src.copy() cv.drawContours(srcCopy, contours, -1, (0,0,255),2) cvShow("drawContours", srcCopy) # finf every sigal character postion words = [] for c in contours: word = [] rect = list(cv2.boundingRect(c)) words.append(rect) words = sorted(words, key = lambda s:s[0], reverse=False) print(words) i = 0 for word in words: if(word[3] > word[2] * 1.8) and (word[3] < (word[2] * 3.5)): i = i + 1 iamge = src[word[1]:word[1]+word[3], word[0]:word[0]+word[2]] cvShow("12", iamge)
其中工具函数代码:utils.py
# @time: 2022/1/6 11:06 # @Author: wangshubo # @File: utilsW.py # @description: 封装的工具函数 # @author_email: '971490321@qq.com' import cv2 as cv import numpy as np def cvShow(name, img): cv.imshow(name, img) cv.waitKey(0) cv.destroyAllWindows() #计算两点距离之和 def CalDistance(pt1, pt2): x1, y1, x2, y2 = pt1[0], pt1[1], pt2[0], pt2[1] distance = np.sqrt(((y2 - y1) ** 2) + ((x2 - x1) ** 2)) return distance # 计算列表中元素之和 def listSum(list): total = 0 ele = 0 while (ele < len(list)): total = total + list[ele] ele += 1 return total #画矩形,第一个参数必须是3通道的 def drawRect(image, x, y, w, h): pt1, pt2 = (x, y), (x + w, y + h) cv.rectangle(image, pt1, pt2, (0, 0, 255), 2) cvShow("image", image)
其中01.py保存的图片用于02.py
总结:传统图像处理步骤:
1,导入图像src
2,滤波去噪,灰度化
3,阈值函数进行处理
4,进行形态学操作,好找轮廓
5,找到轮廓,进行轮廓排序
6,画上轮廓
7,挑选轮廓
8,找到轮廓坐标,然后定位原图像src其中的具体位置。
9,进行matchTemplate比较
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。