Pytesseract没有给出预期的输出

时间:2018-11-11 08:08:28

标签: python python-3.x opencv python-tesseract

我是pyhton的新手,我正在使用haar级联制作车牌识别系统。我的代码可以很好地检测到车牌并绘制轮廓,但是pytesseract ocr无法识别字符并给出了奇怪的结果。请帮忙。

The detected plate using haar cascades

contours made on the detected region

This is the output

import cv2
import numpy as np
import pytesseract

plate_cascade = cv2.CascadeClassifier('C:/Users/Jai/Desktop/Test/haarcascade_plate.xml')

img = cv2.imread('C:/Users/Jai/Desktop/Test/images/NumberPlates/IMG_20181029_194221.jpg', cv2.IMREAD_COLOR)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.array(gray, dtype='uint8')
cv2.imshow('gray', gray)

plates = plate_cascade.detectMultiScale(gray, 1.3, 5)

for (x,y,w,h) in plates:
    cv2.rectangle(img, (x,y), (x+w,y+h),(255,0,0),5)
    roiGray = gray[y:y+h, x:x+w]
    roiImg = img[y:y+h, x:x+w]

roiUGray = cv2.bitwise_not(roiGray)
cv2.imshow('img', img)
cv2.imshow('roi_gray', roiUGray)

ret, thresh = cv2.threshold(roiUGray, 127, 255,0)
cv2.imshow("img", img)
height, width = thresh.shape
newImage = np.zeros((height, width, 3), np.uint8)
newImage[:, :] = (0,0,0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

num = 0
area = 0

for j in range(len(contours)):
    if hierarchy[0][j][3] != -1:
        area = area + cv2.contourArea(contours[j])
        num += 1

if num != 0:
    avgArea = float(area)/float(num)

for j in range(len(contours)):
    if hierarchy[0][j][3] != -1 and cv2.contourArea(contours[j]) > 0.2*avgArea:
        cv2.drawContours(newImage, contours[j], -1, (255,255,255), 1)

blur = cv2.GaussianBlur(newImage, (1, 1), 0)
cv2.imshow("roi", blur)

ocr_result = pytesseract.image_to_string(blur, lang='eng')
print(ocr_result)

cv2.waitKey(0)
cv2.destroyAllWindows()

1 个答案:

答案 0 :(得分:2)

此方法使用轮廓检测​​来找到车牌的区域,并对其进行透视变换。然后,它使用自适应阈值检测法来检测数字,并进行中位数模糊处理以消除干扰pytesseract操作的噪声。

蓝色框来自原始图像。红色框是从我的车牌号识别哈尔级联。绿色是车牌的轮廓检测。

这是透视变换的输出。我使用imutils模块来完成此操作。

这是自适应阈值和模糊的输出,我使用了skimage模块。

使用它,我得到的输出是:

  

\ fUP1ADN7120

并删除所有不是大写字母或数字的字符,则会得到:

  

UP1ADN7120

只有一个字符错误。

这种方法还不错,但是您可以变得更好using another method.,如果这种方法对您不起作用,那么您可以创建一个CNN。

代码如下:

import cv2
import numpy as np
import pytesseract
import imutils.perspective
from skimage.filters import threshold_local

plate_cascade = cv2.CascadeClassifier('/usr/local/lib/python3.5/dist-packages/cv2/data/haarcascade_russian_plate_number.xml')

img = cv2.imread('GNA0d.jpg', cv2.IMREAD_COLOR)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.array(gray, dtype='uint8')

plates = plate_cascade.detectMultiScale(gray, 1.3, 5)

for (x,y,w,h) in plates:
    cv2.rectangle(img, (x,y), (x+w,y+h),(0,0,255),2)
    roiGray = gray[y:y+h, x:x+w]
    roiImg = img[y:y+h, x:x+w]

blur = cv2.GaussianBlur(roiGray, (5, 5), 0)
edges = cv2.Canny(blur, 75, 200)

contours = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5]  #sort the contours, only getting the biggest to improve speed

for contour in contours:
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)

    #find contours with 4 edges
    if len(approx) == 4:
        screenCnt = approx
        break

orig = roiImg.copy()
cv2.drawContours(roiImg, [screenCnt], -1, (0, 255, 0), 2)

#do a perspective transform
warped = imutils.perspective.four_point_transform(orig, screenCnt.reshape(4, 2))
graywarp = imutils.perspective.four_point_transform(roiGray, screenCnt.reshape(4, 2))

#threshold using adaptive thresholding
T = threshold_local(graywarp, 11, offset = 10, method = "gaussian")
graywarp = (graywarp > T).astype("uint8") * 255

#do a median blur to remove noise
graywarp = cv2.medianBlur(graywarp, 3)

text = pytesseract.image_to_string(graywarp)
print(text)
print("".join([c for c in text if c.isupper() or c.isdigit()]))


cv2.imshow("warped", warped)
cv2.imshow("graywarp", graywarp)
cv2.imshow('img', img)


cv2.waitKey(0)
cv2.destroyAllWindows()

pyimagesearch那里取了一点点,他比我能解释得更好。