I am very exciting to answer my question ! It was easy but it happens when u just begin with something with not so proper relevant documentation .
I was trying hard to get the corners of a general rectangle which was not defined in the implementation of openCV and hence was almost impossible.
I followed the standard code on stackoverflow for the largest Square detection. and corners can be easily find out using the approxCurve itself.
//convert the image to black and white
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
//convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource, imgSource, 50, 50);
//apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
//find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
int maxAreaIdx = -1;
Log.d("size",Integer.toString(contours.size()));
MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint largest_contour = contours.get(0);
//largest_contour.ge
List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
//Imgproc.drawContours(imgSource,contours, -1, new Scalar(0, 255, 0), 1);
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
//compare this contour to the previous largest contour found
if (contourarea > maxArea) {
//check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
int contourSize = (int)temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize*0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
maxAreaIdx = idx;
approxCurve=approxCurve_temp;
largest_contour = temp_contour;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
sourceImage =Highgui.imread(Environment.getExternalStorageDirectory().
getAbsolutePath() +"/scan/p/1.jpg");
double[] temp_double;
temp_double = approxCurve.get(0,0);
Point p1 = new Point(temp_double[0], temp_double[1]);
//Core.circle(imgSource,p1,55,new Scalar(0,0,255));
//Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
temp_double = approxCurve.get(1,0);
Point p2 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p2,150,new Scalar(255,255,255));
temp_double = approxCurve.get(2,0);
Point p3 = new Point(temp_double[0], temp_double[1]);
//Core.circle(imgSource,p3,200,new Scalar(255,0,0));
temp_double = approxCurve.get(3,0);
Point p4 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p4,100,new Scalar(0,0,255));
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result=warp(sourceImage,startM);
return result;
and the function used for the perspective transform is given below :
public Mat warp(Mat inputMat,Mat startM) {
int resultWidth = 1000;
int resultHeight = 1000;
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
Point ocvPOut1 = new Point(0, 0);
Point ocvPOut2 = new Point(0, resultHeight);
Point ocvPOut3 = new Point(resultWidth, resultHeight);
Point ocvPOut4 = new Point(resultWidth, 0);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat,
outputMat,
perspectiveTransform,
new Size(resultWidth, resultHeight),
Imgproc.INTER_CUBIC);
return outputMat;
}