經過一個月的研究、opencv、能處理圖片,並半吊子識別。
暫時還是沒有資料,不能實現需求護照識別,對身份證的識別,又因為中文的原因,識別率不高。其次針對護照處理圖片的引數需要動態配置。
對於只熟悉java的開發,查詢資料學習opencv有些困難,網上大多都是C++版本的,並且,2.4,3.4庫有改動。
記錄一下opencv的基本操作,一起學習。並提供一種ocr思路
一、處理身份證案例:
Opencv獲取身份證號碼區域的示例程式碼 m.jb51.net/show/144805
1、對圖片進行降噪以及二值化,凸顯內容區域
2、對圖片進行輪廓檢測
3、對輪廓結果進行分析
4、剪裁指定區域
複製程式碼
- 處理過程
- 灰度圖
- 高斯模糊降噪 GaussianBlur
- 二值化 threshold
- 中值濾波降噪 medianBlur
- 腐蝕操作 erode
二、一般處理程式碼
- 2.4版本
//獲取圖片
Mat templateImage = Highgui.imread(templateFilePath, Highgui.CV_LOAD_IMAGE_COLOR);
//灰度
Imgproc.cvtColor(img2, img2, Imgproc.COLOR_BGR2GRAY);
//高斯濾波
Imgproc.GaussianBlur(img2, img2, new Size(3,3), 0);
//中值濾波
Imgproc.medianBlur(img2,img2,3);
//腐蝕
Imgproc.erode(originalImage, originalImage, new Mat(14, 14, 0));
//可調節閾值二值
Imgproc.adaptiveThreshold(img2, img2, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 33, 25);
//只過濾黑色
Mat imgHSV = new Mat(img.rows(), img.cols(), CvType.CV_8UC3);
Imgproc.cvtColor(img, imgHSV, Imgproc.COLOR_BGR2GRAY);
Scalar minValues = new Scalar(0, 0, 0);
Scalar maxValues = new Scalar(107, 107, 107);
Mat mask = new Mat();
Core.inRange(imgHSV, minValues, maxValues, mask);
//邊緣檢測矩形識別,並標註
List<MatOfPoint> contours=new ArrayList<>();
Mat mat=new Mat();
Imgproc.findContours(originalImage1,contours,mat,Imgproc.RETR_LIST,Imgproc.CHAIN_APPROX_NONE);
Mat originalImage12=originalImage1;
for (int i = 0; i < contours.size(); i++) {
Rect rect = Imgproc.boundingRect(contours.get(i));
Core.rectangle(originalImage12, rect.tl(), rect.br(), new Scalar(255, 0, 255));
originalImage12.submat(rect);
}
Highgui.imwrite(Imginfo.PATH_CACHE+UUID.randomUUID().toString()+Imginfo.JPG_SUFFIX, originalImage12);
複製程式碼
三、圖形匹配,仿射變換、旋轉
package com.cyd.ocr.passportocr;
import com.sun.image.codec.jpeg.JPEGCodec;
import com.sun.image.codec.jpeg.JPEGEncodeParam;
import com.sun.image.codec.jpeg.JPEGImageEncoder;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.*;
import org.opencv.features2d.*;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.*;
import java.util.LinkedList;
import java.util.List;
/**
*
* 根據特徵點,匹配模板,旋轉擷取,根據比例特定區域進行識別。
*
* 未改進點:只過濾出黑色進行識別。
* @author chenyd
* @date 2018/10/7 18:34
*/
public class test_ocr {
public static int DPI = 300;
private float nndrRatio = 0.7f;//這裡設定既定值為0.7,該值可自行調整
private int matchesPointCount = 0;
public float getNndrRatio() {
return nndrRatio;
}
public void setNndrRatio(float nndrRatio) {
this.nndrRatio = nndrRatio;
}
public int getMatchesPointCount() {
return matchesPointCount;
}
public void setMatchesPointCount(int matchesPointCount) {
this.matchesPointCount = matchesPointCount;
}
public void matchImage(Mat templateImage, Mat originalImage) {
MatOfKeyPoint templateKeyPoints = new MatOfKeyPoint();
//指定特徵點演算法SURF
FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF);
//獲取模板圖的特徵點
featureDetector.detect(templateImage, templateKeyPoints);
//提取模板圖的特徵點
MatOfKeyPoint templateDescriptors = new MatOfKeyPoint();
DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
System.out.println("提取模板圖的特徵點");
descriptorExtractor.compute(templateImage, templateKeyPoints, templateDescriptors);
//顯示模板圖的特徵點圖片
Mat outputImage = new Mat(templateImage.rows(), templateImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR);
System.out.println("在圖片上顯示提取的特徵點");
Features2d.drawKeypoints(templateImage, templateKeyPoints, outputImage, new Scalar(255, 0, 0), 0);
//獲取原圖的特徵點
MatOfKeyPoint originalDescriptors = new MatOfKeyPoint();
MatOfKeyPoint originalKeyPoints = new MatOfKeyPoint();
featureDetector.detect(originalImage, originalKeyPoints);
System.out.println("提取原圖的特徵點");
descriptorExtractor.compute(originalImage, originalKeyPoints, originalDescriptors);
List<MatOfDMatch> matches = new LinkedList();
DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
System.out.println("尋找最佳匹配");
/**
* knnMatch方法的作用就是在給定特徵描述集合中尋找最佳匹配
* 使用KNN-matching演算法,令K=2,則每個match得到兩個最接近的descriptor,然後計算最接近距離和次接近距離之間的比值,當比值大於既定值時,才作為最終match。
*/
descriptorMatcher.knnMatch(templateDescriptors, originalDescriptors, matches, 2);
System.out.println("計算匹配結果");
LinkedList<DMatch> goodMatchesList = new LinkedList();
//對匹配結果進行篩選,依據distance進行篩選
matches.forEach(match -> {
DMatch[] dmatcharray = match.toArray();
DMatch m1 = dmatcharray[0];
DMatch m2 = dmatcharray[1];
if (m1.distance <= m2.distance * nndrRatio) {
goodMatchesList.addLast(m1);
}
});
matchesPointCount = goodMatchesList.size();
//當匹配後的特徵點大於等於 4 個,則認為模板圖在原圖中,該值可以自行調整
if (matchesPointCount >= 4) {
System.out.println("模板圖在原圖匹配成功!");
List<KeyPoint> templateKeyPointList = templateKeyPoints.toList();
List<KeyPoint> originalKeyPointList = originalKeyPoints.toList();
LinkedList<Point> objectPoints = new LinkedList();
LinkedList<Point> scenePoints = new LinkedList();
goodMatchesList.forEach(goodMatch -> {
objectPoints.addLast(templateKeyPointList.get(goodMatch.queryIdx).pt);
scenePoints.addLast(originalKeyPointList.get(goodMatch.trainIdx).pt);
});
MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
objMatOfPoint2f.fromList(objectPoints);
MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
scnMatOfPoint2f.fromList(scenePoints);
//使用 findHomography 尋找匹配上的關鍵點的變換
Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);
/**
* 透視變換(Perspective Transformation)是將圖片投影到一個新的視平面(Viewing Plane),也稱作投影對映(Projective Mapping)。
*/
Mat templateCorners = new Mat(4, 1, CvType.CV_32FC2);
Mat templateTransformResult = new Mat(4, 1, CvType.CV_32FC2);
templateCorners.put(0, 0, new double[]{0, 0});
templateCorners.put(1, 0, new double[]{templateImage.cols(), 0});
templateCorners.put(2, 0, new double[]{templateImage.cols(), templateImage.rows()});
templateCorners.put(3, 0, new double[]{0, templateImage.rows()});
//使用 perspectiveTransform 將模板圖進行透視變以矯正圖象得到標準圖片
Core.perspectiveTransform(templateCorners, templateTransformResult, homography);
//矩形四個頂點
double[] pointA = templateTransformResult.get(0, 0);
double[] pointB = templateTransformResult.get(1, 0);
double[] pointC = templateTransformResult.get(2, 0);
double[] pointD = templateTransformResult.get(3, 0);
// System.out.println(String.format("【%s,%s】,【%s,%s】,【%s,%s】,【%s,%s】",pointA[0],pointA[1],pointB[0],pointB[1], pointC[0],pointC[1],pointD[0],pointD[1]));
//左上,右上點之間的距離
double range=getDistance(new Point(pointA),new Point(pointB));
double sina=Math.abs(pointA[1]-pointB[1]);
double jd=Math.asin(sina/range)/Math.PI*180;
System.out.println("旋轉角度:"+jd);
Mat jdmat=rotate3(originalImage,-jd);
String xz="C:\\Users\\chenyd\\Desktop\\img\\idcode\\jdmat.jpg";
Highgui.imwrite(xz, jdmat);
if(jd > 1){
System.out.println("匹配旋轉之後的圖片");
matchImage(templateImage,jdmat);
return;
}
//指定取得陣列子集的範圍
int rowStart = (int) pointA[1];
int rowEnd = (int) pointC[1];
int colStart = (int) pointD[0];
int colEnd = (int) pointB[0];
int temp=0;
if(rowStart>rowEnd){
temp=rowStart;
rowStart=rowEnd;
rowEnd=temp;
}
if(colStart>colEnd){
temp=colStart;
colStart=colEnd;
colEnd=temp;
}
//TODO 大於零
System.out.println(String.format("%s,%s,%s,%s",rowStart, rowEnd, colStart, colEnd));
Mat subMat = originalImage.submat(rowStart, rowEnd, colStart, colEnd);
Highgui.imwrite("C:\\Users\\chenyd\\Desktop\\img\\idcode\\match.jpg", subMat);
subTarget(subMat);
//將匹配的影象用用四條線框出來
Core.line(originalImage, new Point(pointA), new Point(pointB), new Scalar(0, 255, 0), 4);//上 A->B
Core.line(originalImage, new Point(pointB), new Point(pointC), new Scalar(0, 255, 0), 4);//右 B->C
Core.line(originalImage, new Point(pointC), new Point(pointD), new Scalar(0, 255, 0), 4);//下 C->D
Core.line(originalImage, new Point(pointD), new Point(pointA), new Scalar(0, 255, 0), 4);//左 D->A
MatOfDMatch goodMatches = new MatOfDMatch();
goodMatches.fromList(goodMatchesList);
Mat matchOutput = new Mat(originalImage.rows() * 2, originalImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR);
Features2d.drawMatches(templateImage, templateKeyPoints, originalImage, originalKeyPoints, goodMatches, matchOutput, new Scalar(0, 255, 0), new Scalar(255, 0, 0), new MatOfByte(), 2);
Highgui.imwrite("C:\\Users\\chenyd\\Desktop\\img\\idcode\\matchOutput.jpg", matchOutput);
Highgui.imwrite("C:\\Users\\chenyd\\Desktop\\img\\idcode\\originalImage.jpg", originalImage);
} else {
System.out.println("模板圖不在原圖中!");
}
Highgui.imwrite("C:\\Users\\chenyd\\Desktop\\img\\idcode\\outputImage.jpg", outputImage);
}
private void subTarget(Mat originalImage) {
//Imgproc.GaussianBlur(originalImage, originalImage, new Size(3,3), 0);
//Imgproc.medianBlur(originalImage,originalImage,3);
Imgproc.cvtColor(originalImage, originalImage, Imgproc.COLOR_BGR2GRAY);
Imgproc.adaptiveThreshold(originalImage, originalImage, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 45, 55);
Highgui.imwrite("C:\\Users\\chenyd\\Desktop\\img\\idcode\\medianBlurMat.jpg", originalImage);
int imgrow=originalImage.rows();
int imgcol=originalImage.cols();
double[] tarT={930,550};//模板中A點座標
//PR
// double[] tarA={301,108};
// double[] tarC={365,138};
//NAME
double[] tarA={307,163};
double[] tarC={492,193};
//DATE
// double[] tarA={300,422};
// double[] tarC={660,460};
/* double[] tarT={1054,553};
//NAME
double[] tarA={314,173};
double[] tarC={607,216};
//NAME
// double[] tarA={515,116};
// double[] tarC={680,145};*/
//識別區左上和右下的相對距離
int targRowS=(int) (tarA[1]/tarT[1]*imgrow);//列
int targRowE=(int) (tarC[1]/tarT[1]*imgrow);
int targColS=(int) (tarA[0]/tarT[0]*imgcol);//行
int targColE=(int) (tarC[0]/tarT[0]*imgcol);
Mat subMat1 = originalImage.submat( targRowS,targRowE, targColS, targColE);
String file ="C:\\Users\\chenyd\\Desktop\\img\\idcode\\subTarget.jpg";
Highgui.imwrite(file, subMat1);
//BufferedImage bi=Mat2Img(subMat1,".jpg");
handleDpi( new File(file), DPI, DPI);
tesseract(file);
}
public static void handleDpi(File file, int xDensity, int yDensity) {
FileOutputStream out=null;
try {
BufferedImage image = ImageIO.read(file);
out=new FileOutputStream(file);
JPEGImageEncoder jpegEncoder = JPEGCodec.createJPEGEncoder(out);
JPEGEncodeParam jpegEncodeParam = jpegEncoder.getDefaultJPEGEncodeParam(image);
jpegEncodeParam.setDensityUnit(JPEGEncodeParam.DENSITY_UNIT_DOTS_INCH);
jpegEncoder.setJPEGEncodeParam(jpegEncodeParam);
//jpegEncodeParam.setQuality(0.75f, false);
//jpegEncodeParam.setQuality(2f, false);
jpegEncodeParam.setXDensity(xDensity);
jpegEncodeParam.setYDensity(yDensity);
jpegEncoder.encode(image, jpegEncodeParam);
image.flush();
} catch (IOException e) {
e.printStackTrace();
}finally {
if(out!=null){
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
public double getDistance(Point p,Point p2){
double _x = Math.abs(p.x - p2.x);
double _y = Math.abs(p.y - p2.y);
return Math.sqrt(_x*_x+_y*_y);
}
public static Mat rotate3(Mat splitImage, double angle)
{
double thera = angle * Math.PI / 180;
double a = Math.sin(thera);
double b = Math.cos(thera);
int wsrc = splitImage.width();
int hsrc = splitImage.height();
int wdst = (int) (hsrc * Math.abs(a) + wsrc * Math.abs(b));
int hdst = (int) (wsrc * Math.abs(a) + hsrc * Math.abs(b));
Mat imgDst = new Mat(hdst, wdst, splitImage.type());
Point pt = new Point(splitImage.cols() / 2, splitImage.rows() / 2);
// 獲取仿射變換矩陣
Mat affineTrans = Imgproc.getRotationMatrix2D(pt, angle, 1.0);
// 改變變換矩陣第三列的值
affineTrans.put(0, 2, affineTrans.get(0, 2)[0] + (wdst - wsrc) / 2);
affineTrans.put(1, 2, affineTrans.get(1, 2)[0] + (hdst - hsrc) / 2);
Imgproc.warpAffine(splitImage, imgDst, affineTrans, imgDst.size());
return imgDst;
}
public static BufferedImage Mat2Img(Mat mat, String fileExtension) {
MatOfByte mob = new MatOfByte();
Highgui.imencode(fileExtension, mat, mob);
byte[] byteArray = mob.toArray();
BufferedImage bufImage = null;
try {
InputStream in = new ByteArrayInputStream(byteArray);
bufImage = ImageIO.read(in);
} catch (Exception e) {
e.printStackTrace();
}
return bufImage;
}
public String tesseract(String file1) {
String result = "";
String imgPath=file1;
BufferedReader bufReader = null;
try {
String outPath = imgPath.substring(0, imgPath.lastIndexOf("."));
Runtime runtime = Runtime.getRuntime();
String command = "tesseract" + " " + imgPath + " " + outPath +" -l eng --psm 7 ";
System.out.println(command);
Process ps = runtime.exec(command);
ps.waitFor();
// 讀取檔案
File file = new File(outPath + ".txt");
bufReader = new BufferedReader(new FileReader(file));
String temp = "";
StringBuffer sb = new StringBuffer();
while ((temp = bufReader.readLine()) != null) {
sb.append(temp);
}
// 文字結果
result = sb.toString();
//if (!StringUtils.isEmpty(result))
// result = result.replaceAll(" ", "");
System.out.println("識別結果>>>>>>>>>: "+result);
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
public static void imshow(Mat image, String windowName){
// try {
// UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
// } catch (ClassNotFoundException e) {
// e.printStackTrace();
// } catch (InstantiationException e) {
// e.printStackTrace();
// } catch (IllegalAccessException e) {
// e.printStackTrace();
// } catch (UnsupportedLookAndFeelException e) {
// e.printStackTrace();
// }
//
// JFrame jFrame = new JFrame(windowName);
// JLabel imageView = new JLabel();
// final JScrollPane imageScrollPane = new JScrollPane(imageView);
// imageScrollPane.setPreferredSize(new Dimension(500, 500)); // set window size
// jFrame.add(imageScrollPane, BorderLayout.CENTER);
// jFrame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
//
// Image loadedImage = Mat2BufferedImage(image);
// imageView.setIcon(new ImageIcon(loadedImage));
// jFrame.pack();
// jFrame.setLocationRelativeTo(null);
// jFrame.setVisible(true);
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// String templateFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\gbc_1.jpg";
// String originalFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\GRC.jpg";
String templateFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\bhs_4.jpg";
String originalFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\1539152678458.jpg";
// String originalFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\BHS_3.jpg";
// String templateFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\SGP_M_1.jpg";
// String originalFilePath = "C:\\Users\\chenyd\\Desktop\\img\\idcode\\SGP_.jpg";
//讀取圖片檔案
Mat templateImage = Highgui.imread(templateFilePath, Highgui.CV_LOAD_IMAGE_COLOR);
Mat originalImage = Highgui.imread(originalFilePath, Highgui.CV_LOAD_IMAGE_COLOR);
test_ocr imageRecognition = new test_ocr();
imageRecognition.matchImage(templateImage, originalImage);
System.out.println("匹配的畫素點總數:" + imageRecognition.getMatchesPointCount());
}
}
複製程式碼
四、自定義影象識別思路
- 識別
識別文字使用開源技術:tesseract 中文識別率不高,需要下載chi檔案,可對中文進行訓練
- 圖片處理
使用opencv,大多數都是C++語言,支援java 2.4版本可以使用圖片匹配,3.4版本不行,報錯
- opencv處理圖片,之後由tesseract識別。
- opencv處理流程
- 將需要識別的圖片製作成模板,扣去需要識別的資訊,保留不變的元素。
- 獲取base64圖片。
- 與模板匹配、並扣取匹配的原圖。
- 將匹配的原圖,過濾黑色,只保留文字
- 將過濾之後的圖,矩形匹配,並增加矩形過濾規則,保留符合要求的矩形
- 訂製模板中資訊的大概矩形位置,矩形匹配大致都落入到模板資訊區域,得到目標矩形座標
- 將扣取的原圖進行灰度、濾波、中值、二值化、並擷取目標矩形座標。