Reputation: 21
I read a tutorial to give the correction of skew angle on this site. But I don't understand how to convert those code to Java.
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = img.begin<uchar>();
cv::Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it) //what is the meaning of this code (1)
points.push_back(it.pos()); //what is the meaning of this code (2)
Please help me understand this code.
Upvotes: 0
Views: 3968
Reputation: 883
private fun main(){
val bmp:Bitmap? = null //Any bitmap (if you are working with bitmap)
var mRgba = Mat() // else you can direct use MAT on onCameraFrame
val mGray = Mat()
val bmp32: Bitmap = bmp.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmp32, mRgba)
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_BGR2GRAY)
mRgba = makeOrientationCorrection(mRgba,mGray)// here actual magic starts
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_BGR2GRAY)
val bmpOutX = Bitmap.createBitmap(
mRgba.cols(),
mRgba.rows(),
Bitmap.Config.ARGB_8888
)
Utils.matToBitmap(mRgba, bmpOutX)
binding.imagePreview.setImageBitmap(bmpOutX!!)
}
private fun makeOrientationCorrection(mRGBA:Mat, mGRAY:Mat):Mat{
val dst = Mat()
val cdst = Mat()
val cdstP: Mat
Imgproc.Canny(mGRAY, dst, 50.0, 200.0, 3, false)
Imgproc.cvtColor(dst, cdst, Imgproc.COLOR_GRAY2BGR)
cdstP = cdst.clone()
val linesP = Mat()
Imgproc.HoughLinesP(dst, linesP, 1.0, Math.PI/180, 50, 50.0, 10.0)
var biggestLineX1 = 0.0
var biggestLineY1 = 0.0
var biggestLineX2 = 0.0
var biggestLineY2 = 0.0
var biggestLine = 0.0
for (x in 0 until linesP.rows()) {
val l = linesP[x, 0]
Imgproc.line(
cdstP, org.opencv.core.Point(l[0], l[1]),
org.opencv.core.Point(l[2], l[3]),
Scalar(0.0, 0.0, 255.0), 3, Imgproc.LINE_AA, 0)
}
for (x in 0 until linesP.rows()) {
val l = linesP[x, 0]
val x1 = l[0]
val y1 = l[1]
val x2 = l[2]
val y2 = l[3]
val lineHeight = sqrt(((x2 - x1).pow(2.0)) + ((y2 - y1).pow(2.0)))
if(biggestLine<lineHeight){
val angleOfRotationX1 = angleOf(PointF(x1.toFloat(),y1.toFloat()),PointF(x2.toFloat(),y2.toFloat()))
Log.e("angleOfRotationX1","$angleOfRotationX1")
if(angleOfRotationX1<45.0 || angleOfRotationX1>270.0){
biggestLine = lineHeight
if(angleOfRotationX1<45.0){
biggestLineX1 = x1
biggestLineY1 = y1
biggestLineX2 = x2
biggestLineY2 = y2
}
if(angleOfRotationX1>270.0){
biggestLineX1 = x2
biggestLineY1 = y2
biggestLineX2 = x1
biggestLineY2 = y1
}
}
}
if(x==linesP.rows()-1){
Imgproc.line(
cdstP, org.opencv.core.Point(biggestLineX1, biggestLineY1),
org.opencv.core.Point(biggestLineX2, biggestLineY2),
Scalar(255.0, 0.0, 0.0), 3, Imgproc.LINE_AA, 0)
}
}
var angle = angleOf(PointF(biggestLineX1.toFloat(),biggestLineY1.toFloat()),PointF(biggestLineX2.toFloat(),biggestLineY2.toFloat()))
Log.e("angleOfRotationX2","$angle")
angle -= (angle * 2)
return deskew(mRGBA,angle)
}
fun angleOf(p1: PointF, p2: PointF): Double {
val deltaY = (p1.y - p2.y).toDouble()
val deltaX = (p2.x - p1.x).toDouble()
val result = Math.toDegrees(Math.atan2(deltaY, deltaX))
return if (result < 0) 360.0 + result else result
}
private fun deskew(src:Mat, angle:Double):Mat{
val center = org.opencv.core.Point((src.width() / 2).toDouble(), (src.height() / 2).toDouble())
val scaleBy = if(angle<0){
1.0+((0.5*angle)/45)//max scale down by 0.50(50%) based on angle
}else{
1.0-((0.3*angle)/45)//max scale down by 0.50(50%) based on angle
}
Log.e("scaleBy",""+scaleBy)
val rotImage = Imgproc.getRotationMatrix2D(center, angle, scaleBy)
val size = Size(src.width().toDouble(), src.height().toDouble())
Imgproc.warpAffine(src, src, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS)
return src
}
Upvotes: 0
Reputation: 11
import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
public class ValidateDocumentAlignment {
public ValidateDocumentAlignment() {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}
public boolean isDocumentTiltAngleWithinThresholdLimit(File scannedDoc, int thresholdAngle) {
int kernelSize = 3;
int cannyLowerThreshold = 25;
int cannyUpperThreshold = 50;
Mat image = new Mat();
Mat blur = new Mat();
Mat edged = new Mat();
Mat dilate = new Mat();
Mat erode = new Mat();
int maxValIdx = 0;
double area = 0;
List<MatOfPoint> contours = new ArrayList<>();
Mat sourceImage = Imgcodecs.imread(scannedDoc.getPath(), Imgcodecs.IMREAD_GRAYSCALE);
Imgproc.adaptiveThreshold(sourceImage, sourceImage, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 15, 40);
Core.bitwise_not(sourceImage, sourceImage);
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(kernelSize, kernelSize));
Imgproc.morphologyEx(sourceImage, image, Imgproc.MORPH_CLOSE, kernel);
Imgproc.GaussianBlur(image, blur, new Size(7, 7), 0);
Imgproc.Canny(blur, edged, cannyLowerThreshold, cannyUpperThreshold);
Imgproc.dilate(edged, dilate, kernel, new Point(-1, -1), 6);
Imgproc.erode(dilate, erode, kernel, new Point(-1, -1), 3);
Imgproc.findContours(erode, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++) {
Rect rect = Imgproc.boundingRect(contours.get(contourIdx));
if ((rect.height * rect.width) > area) {
area = rect.height * rect.width;
maxValIdx = contourIdx;
}
}
RotatedRect rotatedRect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(maxValIdx).toArray()));
double skewAngle = rotatedRect.angle;
int acuteAngle = (int) (skewAngle % 90);
boolean isProperlyAligned = true;
if (Math.abs(acuteAngle) > thresholdAngle && Math.abs(acuteAngle) < (90 - thresholdAngle)) {
isProperlyAligned = false;
}
return isProperlyAligned;
}
}
Upvotes: 0
Reputation: 31
For OpenCV 3.2.0 Here is full translation of deskew in Java from C++ at https://felix.abecassis.me/2011/10/opencv-bounding-box-skew-angle/ (with little modifications):
public Mat deskew(Mat src, double angle) {
Point center = new Point(src.width()/2, src.height()/2);
Mat rotImage = Imgproc.getRotationMatrix2D(center, angle, 1.0);
//1.0 means 100 % scale
Size size = new Size(src.width(), src.height());
Imgproc.warpAffine(src, src, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS);
return src;
}
public void computeSkew( String inFile ) {
//Load this image in grayscale
Mat img = Imgcodecs.imread( inFile, Imgcodecs.IMREAD_GRAYSCALE );
//Binarize it
//Use adaptive threshold if necessary
//Imgproc.adaptiveThreshold(img, img, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, 40);
Imgproc.threshold( img, img, 200, 255, THRESH_BINARY );
//Invert the colors (because objects are represented as white pixels, and the background is represented by black pixels)
Core.bitwise_not( img, img );
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
//We can now perform our erosion, we must declare our rectangle-shaped structuring element and call the erode function
Imgproc.erode(img, img, element);
//Find all white pixels
Mat wLocMat = Mat.zeros(img.size(),img.type());
Core.findNonZero(img, wLocMat);
//Create an empty Mat and pass it to the function
MatOfPoint matOfPoint = new MatOfPoint( wLocMat );
//Translate MatOfPoint to MatOfPoint2f in order to user at a next step
MatOfPoint2f mat2f = new MatOfPoint2f();
matOfPoint.convertTo(mat2f, CvType.CV_32FC2);
//Get rotated rect of white pixels
RotatedRect rotatedRect = Imgproc.minAreaRect( mat2f );
Point[] vertices = new Point[4];
rotatedRect.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
Imgproc.drawContours( img, boxContours, 0, new Scalar(128, 128, 128), -1);
double resultAngle = rotatedRect.angle;
if (rotatedRect.size.width > rotatedRect.size.height)
{
rotatedRect.angle += 90.f;
}
//Or
//rotatedRect.angle = rotatedRect.angle < -45 ? rotatedRect.angle + 90.f : rotatedRect.angle;
Mat result = deskew( Imgcodecs.imread( inFile ), rotatedRect.angle );
Imgcodecs.imwrite( outputFile, result );
}
Upvotes: 3