我正在尝试修复损坏的文本(下图),以便可以对图像执行OCR。我该如何修复下面的文字?我已经尝试过膨胀,腐蚀,形态封闭以及使用等高线之间的距离。这些似乎都不起作用。谢谢您的帮助。
残破的文本:
尝试的解决方案(无用):
import cv2
import pytesseract
import numpy as np
img = cv2.imread ("/Users/2020shatgiskessell/Desktop/OpenSlate/FN2.png")
def OCR (img):
config = ('-l eng --oem 1 --psm 3')
text = pytesseract.image_to_string(img, config = config)
return text
def get_countour(img):
try:
output = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
output = output.copy()
except Exception:
output = img.copy()
#imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(output, 127, 255, 0)
contours, hierarchy = cv2.findContours(output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
c = max(contours, key = cv2.contourArea)
contours.remove(c)
cv2.drawContours(output, contours, -1, (0,255,0),-1)
kernel = np.ones((2,1),np.uint8)
#eroded = cv2.erode(output, kernel,1)
output = cv2.dilate(output, kernel,1)
return output
def strengthen(img):
try:
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except Exception:
imgray = img
#ret, thresh = cv2.threshold(imgray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#blur1 = cv2.blur(imgray,(5,5))
blur2 = cv2.GaussianBlur(imgray,(5,5),0)
thresh2 = cv2.adaptiveThreshold(blur2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
kernel = np.ones((2,1),np.uint8)
#eroded = cv2.erode(thresh2, kernel,1)
#opening = cv2.morphologyEx(eroded, cv2.MORPH_CLOSE, kernel)
#closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
return thresh2
#MNIST(img)
strengthened= strengthen(img)
contours = get_countour(strengthened)
print("from morphology transformation: "+ OCR(contours))
cv2.imshow('img', img)
cv2.imshow('contour', contours)
cv2.waitKey(0)
cv2.destroyAllWindows()
以上图像被识别为:
图片1:(CAN警报器
图片2:> AMAR VRAIR
图片3:静止
答案 0 :(得分:4)
您可以通过图像完成来训练GAN(生成对抗网络)来做到这一点:
使用深度卷积生成对抗网络完成图像
https://github.com/saikatbsk/ImageCompletion-DCGAN
示例:
有关GAN的更多信息:
答案 1 :(得分:2)
您的图像只是结果而已,不是来源吗? 你玩过像21、21这样的模糊参数吗?
blur2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur2 = cv2.GaussianBlur(blur2 , (21, 21), 0)