-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
Demo.java
154 lines (137 loc) · 7.96 KB
/
Demo.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/*
* Copyright (C) 2009-2018 Samuel Audet
*
* Licensed either under the Apache License, Version 2.0, or (at your option)
* under the terms of the GNU General Public License as published by
* the Free Software Foundation (subject to the "Classpath" exception),
* either version 2, or any later version (collectively, the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* http://www.gnu.org/licenses/
* http://www.gnu.org/software/classpath/license.html
*
* or as provided in the LICENSE.txt file that accompanied this code.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.net.URL;
import org.bytedeco.javacv.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.indexer.*;
import org.bytedeco.opencv.opencv_core.*;
import org.bytedeco.opencv.opencv_imgproc.*;
import org.bytedeco.opencv.opencv_calib3d.*;
import org.bytedeco.opencv.opencv_objdetect.*;
import static org.bytedeco.opencv.global.opencv_core.*;
import static org.bytedeco.opencv.global.opencv_imgproc.*;
import static org.bytedeco.opencv.global.opencv_calib3d.*;
import static org.bytedeco.opencv.global.opencv_objdetect.*;
public class Demo {
public static void main(String[] args) throws Exception {
String classifierName = null;
if (args.length > 0) {
classifierName = args[0];
} else {
URL url = new URL("https://raw.github.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_alt.xml");
File file = Loader.cacheResource(url);
classifierName = file.getAbsolutePath();
}
// We can "cast" Pointer objects by instantiating a new object of the desired class.
CascadeClassifier classifier = new CascadeClassifier(classifierName);
if (classifier == null) {
System.err.println("Error loading classifier file \"" + classifierName + "\".");
System.exit(1);
}
// The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio),
// DC1394FrameGrabber, FlyCapture2FrameGrabber, OpenKinectFrameGrabber, OpenKinect2FrameGrabber,
// RealSenseFrameGrabber, RealSense2FrameGrabber, PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber.
FrameGrabber grabber = FrameGrabber.createDefault(0);
grabber.start();
// CanvasFrame, FrameGrabber, and FrameRecorder use Frame objects to communicate image data.
// We need a FrameConverter to interface with other APIs (Android, Java 2D, JavaFX, Tesseract, OpenCV, etc).
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
// FAQ about IplImage and Mat objects from OpenCV:
// - For custom raw processing of data, createBuffer() returns an NIO direct
// buffer wrapped around the memory pointed by imageData, and under Android we can
// also use that Buffer with Bitmap.copyPixelsFromBuffer() and copyPixelsToBuffer().
// - To get a BufferedImage from an IplImage, or vice versa, we can chain calls to
// Java2DFrameConverter and OpenCVFrameConverter, one after the other.
// - Java2DFrameConverter also has static copy() methods that we can use to transfer
// data more directly between BufferedImage and IplImage or Mat via Frame objects.
Mat grabbedImage = converter.convert(grabber.grab());
int height = grabbedImage.rows();
int width = grabbedImage.cols();
// Objects allocated with `new`, clone(), or a create*() factory method are automatically released
// by the garbage collector, but may still be explicitly released by calling deallocate().
// You shall NOT call cvReleaseImage(), cvReleaseMemStorage(), etc. on objects allocated this way.
Mat grayImage = new Mat(height, width, CV_8UC1);
Mat rotatedImage = grabbedImage.clone();
// The OpenCVFrameRecorder class simply uses the VideoWriter of opencv_videoio,
// but FFmpegFrameRecorder also exists as a more versatile alternative.
FrameRecorder recorder = FrameRecorder.createDefault("output.avi", width, height);
recorder.start();
// CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated.
// It can also switch into full-screen mode when called with a screenNumber.
// We should also specify the relative monitor/camera response for proper gamma correction.
CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma());
// Let's create some random 3D rotation...
Mat randomR = new Mat(3, 3, CV_64FC1),
randomAxis = new Mat(3, 1, CV_64FC1);
// We can easily and efficiently access the elements of matrices and images
// through an Indexer object with the set of get() and put() methods.
DoubleIndexer Ridx = randomR.createIndexer(),
axisIdx = randomAxis.createIndexer();
axisIdx.put(0, (Math.random() - 0.5) / 4,
(Math.random() - 0.5) / 4,
(Math.random() - 0.5) / 4);
Rodrigues(randomAxis, randomR);
double f = (width + height) / 2.0; Ridx.put(0, 2, Ridx.get(0, 2) * f);
Ridx.put(1, 2, Ridx.get(1, 2) * f);
Ridx.put(2, 0, Ridx.get(2, 0) / f); Ridx.put(2, 1, Ridx.get(2, 1) / f);
System.out.println(Ridx);
// We can allocate native arrays using constructors taking an integer as argument.
Point hatPoints = new Point(3);
while (frame.isVisible() && (grabbedImage = converter.convert(grabber.grab())) != null) {
// Let's try to detect some faces! but we need a grayscale image...
cvtColor(grabbedImage, grayImage, CV_BGR2GRAY);
RectVector faces = new RectVector();
classifier.detectMultiScale(grayImage, faces);
long total = faces.size();
for (long i = 0; i < total; i++) {
Rect r = faces.get(i);
int x = r.x(), y = r.y(), w = r.width(), h = r.height();
rectangle(grabbedImage, new Point(x, y), new Point(x + w, y + h), Scalar.RED, 1, CV_AA, 0);
// To access or pass as argument the elements of a native array, call position() before.
hatPoints.position(0).x(x - w / 10 ).y(y - h / 10);
hatPoints.position(1).x(x + w * 11 / 10).y(y - h / 10);
hatPoints.position(2).x(x + w / 2 ).y(y - h / 2 );
fillConvexPoly(grabbedImage, hatPoints.position(0), 3, Scalar.GREEN, CV_AA, 0);
}
// Let's find some contours! but first some thresholding...
threshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY);
// To check if an output argument is null we may call either isNull() or equals(null).
MatVector contours = new MatVector();
findContours(grayImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
long n = contours.size();
for (long i = 0; i < n; i++) {
Mat contour = contours.get(i);
Mat points = new Mat();
approxPolyDP(contour, points, arcLength(contour, true) * 0.02, true);
drawContours(grabbedImage, new MatVector(points), -1, Scalar.BLUE);
}
warpPerspective(grabbedImage, rotatedImage, randomR, rotatedImage.size());
Frame rotatedFrame = converter.convert(rotatedImage);
frame.showImage(rotatedFrame);
recorder.record(rotatedFrame);
}
frame.dispose();
recorder.stop();
grabber.stop();
}
}