Calibrate works?

This commit is contained in:
jan 2016-07-10 00:36:44 -07:00
commit cc711c6343
1104 changed files with 636510 additions and 75 deletions

View file

@ -0,0 +1,37 @@
import gab.opencv.*;
import processing.video.*;
Movie video;
OpenCV opencv;
void setup() {
size(720, 480);
video = new Movie(this, "street.mov");
opencv = new OpenCV(this, 720, 480);
opencv.startBackgroundSubtraction(5, 3, 0.5);
video.loop();
video.play();
}
void draw() {
image(video, 0, 0);
opencv.loadImage(video);
opencv.updateBackground();
opencv.dilate();
opencv.erode();
noFill();
stroke(255, 0, 0);
strokeWeight(3);
for (Contour contour : opencv.findContours()) {
contour.draw();
}
}
void movieEvent(Movie m) {
m.read();
}

View file

@ -0,0 +1,22 @@
import gab.opencv.*;
OpenCV opencv;
void setup() {
PImage src = loadImage("robot_light.jpg");
src.resize(800, 0);
size(src.width, src.height);
opencv = new OpenCV(this, src);
}
void draw() {
image(opencv.getOutput(), 0, 0);
PVector loc = opencv.max();
stroke(255, 0, 0);
strokeWeight(4);
noFill();
ellipse(loc.x, loc.y, 10, 10);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

View file

@ -0,0 +1,17 @@
import gab.opencv.*;
PImage img;
OpenCV opencv;
void setup(){
img = loadImage("test.jpg");
size(img.width, img.height);
opencv = new OpenCV(this, img);
}
void draw(){
opencv.loadImage(img);
opencv.brightness((int)map(mouseX, 0, width, -255, 255));
image(opencv.getOutput(),0,0);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,25 @@
import gab.opencv.*;
PImage src;
ArrayList<PVector> cornerPoints;
OpenCV opencv;
void setup() {
src = loadImage("checkerboard.jpg");
src.resize(500, 0);
size(src.width, src.height);
opencv = new OpenCV(this, src);
opencv.gray();
cornerPoints = opencv.findChessboardCorners(9,6);
}
void draw() {
image( opencv.getOutput(), 0, 0);
fill(255,0,0);
noStroke();
for(PVector p : cornerPoints){
ellipse(p.x, p.y, 5, 5);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

View file

@ -0,0 +1,46 @@
import gab.opencv.*;
OpenCV opencv;
PImage src, r, g, b, h, s, v;
int imgH, imgW;
void setup() {
src = loadImage("green_object.png");
src.resize(800,0);
opencv = new OpenCV(this, src);
size(int(opencv.width*1.5), int(opencv.height * 1.5));
imgH = src.height/2;
imgW = src.width/2;
r = opencv.getSnapshot(opencv.getR());
g = opencv.getSnapshot(opencv.getG());
b = opencv.getSnapshot(opencv.getB());
opencv.useColor(HSB);
h = opencv.getSnapshot(opencv.getH());
s = opencv.getSnapshot(opencv.getS());
v = opencv.getSnapshot(opencv.getV());
}
void draw() {
background(0);
noTint();
image(src, imgW,0, imgW, imgH);
tint(255,0,0);
image(r, 0, imgH, imgW, imgH);
tint(0,255,0);
image(g, imgW, imgH, imgW, imgH);
tint(0,0,255);
image(b, 2*imgW, imgH, imgW, imgH);
noTint();
image(h, 0, 2*imgH, imgW, imgH);
image(s, imgW, 2*imgH, imgW, imgH);
image(v, 2*imgW, 2*imgH, imgW, imgH);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 MiB

View file

@ -0,0 +1,58 @@
import gab.opencv.*;
import org.opencv.core.Mat;
import org.opencv.calib3d.StereoBM;
import org.opencv.core.CvType;
import org.opencv.calib3d.StereoSGBM;
OpenCV ocvL, ocvR;
PImage imgL, imgR, depth1, depth2;
void setup() {
imgL = loadImage("scene_l.jpg");
imgR = loadImage("scene_r.jpg");
ocvL = new OpenCV(this, imgL);
ocvR = new OpenCV(this, imgR);
size(ocvL.width * 2, ocvL.height*2);
ocvL.gray();
ocvR.gray();
Mat left = ocvL.getGray();
Mat right = ocvR.getGray();
Mat disparity = OpenCV.imitate(left);
StereoSGBM stereo = new StereoSGBM(0, 32, 3, 128, 256, 20, 16, 1, 100, 20, true);
stereo.compute(left, right, disparity );
Mat depthMat = OpenCV.imitate(left);
disparity.convertTo(depthMat, depthMat.type());
depth1 = createImage(depthMat.width(), depthMat.height(), RGB);
ocvL.toPImage(depthMat, depth1);
StereoBM stereo2 = new StereoBM();
stereo2.compute(left, right, disparity );
disparity.convertTo(depthMat, depthMat.type());
depth2 = createImage(depthMat.width(), depthMat.height(), RGB);
ocvL.toPImage(depthMat, depth2);
}
void draw() {
image(imgL, 0, 0);
image(imgR, imgL.width, 0);
image(depth1, 0, imgL.height);
image(depth2, imgL.width, imgL.height);
fill(255, 0, 0);
text("left", 10, 20);
text("right", 10 + imgL.width, 20);
text("stereo SGBM", 10, imgL.height + 20);
text("stereo BM", 10 + imgL.width, imgL.height+ 20);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

View file

@ -0,0 +1,51 @@
import gab.opencv.*;
PImage src, dilated, eroded, both;
OpenCV opencv;
void setup() {
src = loadImage("pen_sketch.jpg");
src.resize(src.width/2, 0);
size(src.width*2, src.height*2);
opencv = new OpenCV(this, src);
// Dilate and Erode both need a binary image
// So, we'll make it gray and threshold it.
opencv.gray();
opencv.threshold(100);
// We'll also invert so that erosion eats away the lines
// and dilation expands them (rather than vice-versa)
opencv.invert();
// save a snapshot to use in both operations
src = opencv.getSnapshot();
// erode and save snapshot for display
opencv.erode();
eroded = opencv.getSnapshot();
// reload un-eroded image and dilate it
opencv.loadImage(src);
opencv.dilate();
// save dilated version for display
dilated = opencv.getSnapshot();
// now erode on top of dilated version to close holes
opencv.erode();
both = opencv.getSnapshot();
noLoop();
}
void draw() {
image(src, 0, 0);
image(eroded, src.width, 0);
image(dilated, 0, src.height);
image(both, src.width, src.height);
fill(0, 255, 0);
text("original", 20, 20);
text("erode", src.width + 20, 20);
text("dilate", 20, src.height+20);
text("dilate then erode\n(close holes)", src.width+20, src.height+20);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View file

@ -0,0 +1,25 @@
import gab.opencv.*;
import java.awt.Rectangle;
OpenCV opencv;
Rectangle[] faces;
void setup() {
opencv = new OpenCV(this, "test.jpg");
size(opencv.width, opencv.height);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
faces = opencv.detect();
}
void draw() {
image(opencv.getInput(), 0, 0);
noFill();
stroke(0, 255, 0);
strokeWeight(3);
for (int i = 0; i < faces.length; i++) {
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

View file

@ -0,0 +1,40 @@
import gab.opencv.*;
OpenCV opencv;
PImage img, thresh, blur, adaptive;
void setup() {
img = loadImage("test.jpg");
size(img.width, img.height);
opencv = new OpenCV(this, img);
PImage gray = opencv.getSnapshot();
opencv.threshold(80);
thresh = opencv.getSnapshot();
opencv.loadImage(gray);
opencv.blur(12);
blur = opencv.getSnapshot();
opencv.loadImage(gray);
opencv.adaptiveThreshold(591, 1);
adaptive = opencv.getSnapshot();
}
void draw() {
pushMatrix();
scale(0.5);
image(img, 0, 0);
image(thresh, img.width, 0);
image(blur, 0, img.height);
image(adaptive, img.width, img.height);
popMatrix();
fill(0);
text("source", img.width/2 - 100, 20 );
text("threshold", img.width - 100, 20 );
text("blur", img.width/2 - 100, img.height/2 + 20 );
text("adaptive threshold", img.width - 150, img.height/2 + 20 );
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,42 @@
import gab.opencv.*;
PImage src, dst;
OpenCV opencv;
ArrayList<Contour> contours;
ArrayList<Contour> polygons;
void setup() {
src = loadImage("test.jpg");
size(src.width, src.height/2);
opencv = new OpenCV(this, src);
opencv.gray();
opencv.threshold(70);
dst = opencv.getOutput();
contours = opencv.findContours();
println("found " + contours.size() + " contours");
}
void draw() {
scale(0.5);
image(src, 0, 0);
image(dst, src.width, 0);
noFill();
strokeWeight(3);
for (Contour contour : contours) {
stroke(0, 255, 0);
contour.draw();
stroke(255, 0, 0);
beginShape();
for (PVector point : contour.getPolygonApproximation().getPoints()) {
vertex(point.x, point.y);
}
endShape();
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,38 @@
import gab.opencv.*;
OpenCV opencv;
PImage src, canny, scharr, sobel;
void setup() {
src = loadImage("test.jpg");
size(src.width, src.height);
opencv = new OpenCV(this, src);
opencv.findCannyEdges(20,75);
canny = opencv.getSnapshot();
opencv.loadImage(src);
opencv.findScharrEdges(OpenCV.HORIZONTAL);
scharr = opencv.getSnapshot();
opencv.loadImage(src);
opencv.findSobelEdges(1,0);
sobel = opencv.getSnapshot();
}
void draw() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(canny, src.width, 0);
image(scharr, 0, src.height);
image(sobel, src.width, src.height);
popMatrix();
text("Source", 10, 25);
text("Canny", src.width/2 + 10, 25);
text("Scharr", 10, src.height/2 + 25);
text("Sobel", src.width/2 + 10, src.height/2 + 25);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,47 @@
import gab.opencv.*;
OpenCV opencv;
Histogram grayHist, rHist, gHist, bHist;
PImage img;
void setup() {
size(640, 400);
img = loadImage("test.jpg");
opencv = new OpenCV(this, img);
grayHist = opencv.findHistogram(opencv.getGray(), 256);
rHist = opencv.findHistogram(opencv.getR(), 256);
gHist = opencv.findHistogram(opencv.getG(), 256);
bHist = opencv.findHistogram(opencv.getB(), 256);
}
void draw() {
background(0);
image(img, 10, 0, 300, 200);
stroke(125); noFill();
rect(320, 10, 310, 180);
fill(125); noStroke();
grayHist.draw(320, 10, 310, 180);
stroke(255, 0, 0); noFill();
rect(10, height - 190, 200, 180);
fill(255, 0, 0); noStroke();
rHist.draw(10, height - 190, 200, 180);
stroke(0, 255, 0); noFill();
rect(220, height - 190, 200, 180);
fill(0, 255, 0); noStroke();
gHist.draw(220, height - 190, 200, 180);
stroke(0, 0, 255); noFill();
rect(430, height - 190, 200, 180);
fill(0, 0, 255); noStroke();
bHist.draw(430, height - 190, 200, 180);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,107 @@
/**
* HSVColorTracking
* Greg Borenstein
* https://github.com/atduskgreg/opencv-processing-book/blob/master/code/hsv_color_tracking/HSVColorTracking/HSVColorTracking.pde
*
* Modified by Jordi Tost @jorditost (color selection)
*
* University of Applied Sciences Potsdam, 2014
*/
import gab.opencv.*;
import processing.video.*;
import java.awt.Rectangle;
Capture video;
OpenCV opencv;
PImage src, colorFilteredImage;
ArrayList<Contour> contours;
// <1> Set the range of Hue values for our filter
int rangeLow = 20;
int rangeHigh = 35;
void setup() {
video = new Capture(this, 640, 480);
video.start();
opencv = new OpenCV(this, video.width, video.height);
contours = new ArrayList<Contour>();
size(2*opencv.width, opencv.height, P2D);
}
void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// <2> Load the new frame of our movie in to OpenCV
opencv.loadImage(video);
// Tell OpenCV to use color information
opencv.useColor();
src = opencv.getSnapshot();
// <3> Tell OpenCV to work in HSV color space.
opencv.useColor(HSB);
// <4> Copy the Hue channel of our image into
// the gray channel, which we process.
opencv.setGray(opencv.getH().clone());
// <5> Filter the image based on the range of
// hue values that match the object we want to track.
opencv.inRange(rangeLow, rangeHigh);
// <6> Get the processed image for reference.
colorFilteredImage = opencv.getSnapshot();
///////////////////////////////////////////
// We could process our image here!
// See ImageFiltering.pde
///////////////////////////////////////////
// <7> Find contours in our range image.
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// <8> Display background images
image(src, 0, 0);
image(colorFilteredImage, src.width, 0);
// <9> Check to make sure we've found any contours
if (contours.size() > 0) {
// <9> Get the first contour, which will be the largest one
Contour biggestContour = contours.get(0);
// <10> Find the bounding box of the largest contour,
// and hence our object.
Rectangle r = biggestContour.getBoundingBox();
// <11> Draw the bounding box of our object
noFill();
strokeWeight(2);
stroke(255, 0, 0);
rect(r.x, r.y, r.width, r.height);
// <12> Draw a dot in the middle of the bounding box, on the object.
noStroke();
fill(255, 0, 0);
ellipse(r.x + r.width/2, r.y + r.height/2, 30, 30);
}
}
void mousePressed() {
color c = get(mouseX, mouseY);
println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c));
int hue = int(map(hue(c), 0, 255, 0, 180));
println("hue to detect: " + hue);
rangeLow = hue - 5;
rangeHigh = hue + 5;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 596 KiB

View file

@ -0,0 +1,61 @@
import gab.opencv.*;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.CvType;
import org.opencv.imgproc.Imgproc;
OpenCV opencv;
PImage src,dst, hist, histMask;
Mat skinHistogram;
void setup(){
src = loadImage("test.jpg");
src.resize(src.width/2, 0);
size(src.width*2 + 256, src.height);
// third argument is: useColor
opencv = new OpenCV(this, src, true);
skinHistogram = Mat.zeros(256, 256, CvType.CV_8UC1);
Core.ellipse(skinHistogram, new Point(113.0, 155.6), new Size(40.0, 25.2), 43.0, 0.0, 360.0, new Scalar(255, 255, 255), Core.FILLED);
histMask = createImage(256,256, ARGB);
opencv.toPImage(skinHistogram, histMask);
hist = loadImage("cb-cr.png");
hist.blend(histMask, 0,0,256,256,0,0,256,256, ADD);
dst = opencv.getOutput();
dst.loadPixels();
for(int i = 0; i < dst.pixels.length; i++){
Mat input = new Mat(new Size(1, 1), CvType.CV_8UC3);
input.setTo(colorToScalar(dst.pixels[i]));
Mat output = opencv.imitate(input);
Imgproc.cvtColor(input, output, Imgproc.COLOR_BGR2YCrCb );
double[] inputComponents = output.get(0,0);
if(skinHistogram.get((int)inputComponents[1], (int)inputComponents[2])[0] > 0){
dst.pixels[i] = color(255);
} else {
dst.pixels[i] = color(0);
}
}
dst.updatePixels();
}
// in BGR
Scalar colorToScalar(color c){
return new Scalar(blue(c), green(c), red(c));
}
void draw(){
image(src,0,0);
image(dst, src.width, 0);
image(hist, src.width*2, 0);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,38 @@
import gab.opencv.*;
OpenCV opencv;
ArrayList<Line> lines;
void setup() {
PImage src = loadImage("film_scan.jpg");
src.resize(0, 800);
size(src.width, src.height);
opencv = new OpenCV(this, src);
opencv.findCannyEdges(20, 75);
// Find lines with Hough line detection
// Arguments are: threshold, minLengthLength, maxLineGap
lines = opencv.findLines(100, 30, 20);
}
void draw() {
image(opencv.getOutput(), 0, 0);
strokeWeight(3);
for (Line line : lines) {
// lines include angle in radians, measured in double precision
// so we can select out vertical and horizontal lines
// They also include "start" and "end" PVectors with the position
if (line.angle >= radians(0) && line.angle < radians(1)) {
stroke(0, 255, 0);
line(line.start.x, line.start.y, line.end.x, line.end.y);
}
if (line.angle > radians(89) && line.angle < radians(91)) {
stroke(255, 0, 0);
line(line.start.x, line.start.y, line.end.x, line.end.y);
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

View file

@ -0,0 +1,64 @@
import gab.opencv.*;
PImage img;
OpenCV opencv;
Histogram histogram;
int lowerb = 50;
int upperb = 100;
void setup() {
img = loadImage("colored_balls.jpg");
opencv = new OpenCV(this, img);
size(opencv.width, opencv.height);
opencv.useColor(HSB);
}
void draw() {
opencv.loadImage(img);
image(img, 0, 0);
opencv.setGray(opencv.getH().clone());
opencv.inRange(lowerb, upperb);
histogram = opencv.findHistogram(opencv.getH(), 255);
image(opencv.getOutput(), 3*width/4, 3*height/4, width/4,height/4);
noStroke(); fill(0);
histogram.draw(10, height - 230, 400, 200);
noFill(); stroke(0);
line(10, height-30, 410, height-30);
text("Hue", 10, height - (textAscent() + textDescent()));
float lb = map(lowerb, 0, 255, 0, 400);
float ub = map(upperb, 0, 255, 0, 400);
stroke(255, 0, 0); fill(255, 0, 0);
strokeWeight(2);
line(lb + 10, height-30, ub +10, height-30);
ellipse(lb+10, height-30, 3, 3 );
text(lowerb, lb-10, height-15);
ellipse(ub+10, height-30, 3, 3 );
text(upperb, ub+10, height-15);
}
void mouseMoved() {
if (keyPressed) {
upperb += mouseX - pmouseX;
}
else {
if (upperb < 255 || (mouseX - pmouseX) < 0) {
lowerb += mouseX - pmouseX;
}
if (lowerb > 0 || (mouseX - pmouseX) > 0) {
upperb += mouseX - pmouseX;
}
}
upperb = constrain(upperb, lowerb, 255);
lowerb = constrain(lowerb, 0, upperb-1);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 268 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

View file

@ -0,0 +1,38 @@
import gab.opencv.*;
OpenCV opencv;
PImage before, after, grayDiff;
//PImage colorDiff;
void setup() {
before = loadImage("before.jpg");
after = loadImage("after.jpg");
size(before.width, before.height);
opencv = new OpenCV(this, before);
opencv.diff(after);
grayDiff = opencv.getSnapshot();
// opencv.useColor();
// opencv.loadImage(after);
// opencv.diff(after);
// colorDiff = opencv.getSnapshot();
}
void draw() {
pushMatrix();
scale(0.5);
image(before, 0, 0);
image(after, before.width, 0);
// image(colorDiff, 0, before.height);
image(grayDiff, before.width, before.height);
popMatrix();
fill(255);
text("before", 10, 20);
text("after", before.width/2 +10, 20);
text("gray diff", before.width/2 + 10, before.height/2+ 20);
// text("color diff", 10, before.height/2+ 20);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

View file

@ -0,0 +1,297 @@
/**
* Image Filtering
* This sketch performs some image filtering (threshold, blur) and contour detection
*
* @author: Jordi Tost (@jorditost)
* @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFiltering
*
* University of Applied Sciences Potsdam, 2014
*
* It requires the ControlP5 Processing library:
* http://www.sojamo.de/libraries/controlP5/
*/
import gab.opencv.*;
import java.awt.Rectangle;
import processing.video.*;
import controlP5.*;
OpenCV opencv;
Capture video;
PImage src, preProcessedImage, processedImage, contoursImage;
ArrayList<Contour> contours;
float contrast = 1.35;
int brightness = 0;
int threshold = 75;
boolean useAdaptiveThreshold = false; // use basic thresholding
int thresholdBlockSize = 489;
int thresholdConstant = 45;
int blobSizeThreshold = 20;
int blurSize = 4;
// Control vars
ControlP5 cp5;
int buttonColor;
int buttonBgColor;
void setup() {
frameRate(15);
video = new Capture(this, 640, 480);
video.start();
opencv = new OpenCV(this, 640, 480);
contours = new ArrayList<Contour>();
size(opencv.width + 200, opencv.height, P2D);
// Init Controls
cp5 = new ControlP5(this);
initControls();
// Set thresholding
toggleAdaptiveThreshold(useAdaptiveThreshold);
}
void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// Load the new frame of our camera in to OpenCV
opencv.loadImage(video);
src = opencv.getSnapshot();
///////////////////////////////
// <1> PRE-PROCESS IMAGE
// - Grey channel
// - Brightness / Contrast
///////////////////////////////
// Gray channel
opencv.gray();
//opencv.brightness(brightness);
opencv.contrast(contrast);
// Save snapshot for display
preProcessedImage = opencv.getSnapshot();
///////////////////////////////
// <2> PROCESS IMAGE
// - Threshold
// - Noise Supression
///////////////////////////////
// Adaptive threshold - Good when non-uniform illumination
if (useAdaptiveThreshold) {
// Block size must be odd and greater than 3
if (thresholdBlockSize%2 == 0) thresholdBlockSize++;
if (thresholdBlockSize < 3) thresholdBlockSize = 3;
opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant);
// Basic threshold - range [0, 255]
} else {
opencv.threshold(threshold);
}
// Invert (black bg, white blobs)
opencv.invert();
// Reduce noise - Dilate and erode to close holes
opencv.dilate();
opencv.erode();
// Blur
opencv.blur(blurSize);
// Save snapshot for display
processedImage = opencv.getSnapshot();
///////////////////////////////
// <3> FIND CONTOURS
///////////////////////////////
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// Save snapshot for display
contoursImage = opencv.getSnapshot();
// Draw
pushMatrix();
// Leave space for ControlP5 sliders
translate(width-src.width, 0);
// Display images
displayImages();
// Display contours in the lower right window
pushMatrix();
scale(0.5);
translate(src.width, src.height);
displayContours();
displayContoursBoundingBoxes();
popMatrix();
popMatrix();
}
/////////////////////
// Display Methods
/////////////////////
void displayImages() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(preProcessedImage, src.width, 0);
image(processedImage, 0, src.height);
image(src, src.width, src.height);
popMatrix();
stroke(255);
fill(255);
text("Source", 10, 25);
text("Pre-processed Image", src.width/2 + 10, 25);
text("Processed Image", 10, src.height/2 + 25);
text("Tracked Points", src.width/2 + 10, src.height/2 + 25);
}
void displayContours() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
noFill();
stroke(0, 255, 0);
strokeWeight(3);
contour.draw();
}
}
void displayContoursBoundingBoxes() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
stroke(255, 0, 0);
fill(255, 0, 0, 150);
strokeWeight(2);
rect(r.x, r.y, r.width, r.height);
}
}
//////////////////////////
// CONTROL P5 Functions
//////////////////////////
void initControls() {
// Slider for contrast
cp5.addSlider("contrast")
.setLabel("contrast")
.setPosition(20,50)
.setRange(0.0,6.0)
;
// Slider for threshold
cp5.addSlider("threshold")
.setLabel("threshold")
.setPosition(20,110)
.setRange(0,255)
;
// Toggle to activae adaptive threshold
cp5.addToggle("toggleAdaptiveThreshold")
.setLabel("use adaptive threshold")
.setSize(10,10)
.setPosition(20,144)
;
// Slider for adaptive threshold block size
cp5.addSlider("thresholdBlockSize")
.setLabel("a.t. block size")
.setPosition(20,180)
.setRange(1,700)
;
// Slider for adaptive threshold constant
cp5.addSlider("thresholdConstant")
.setLabel("a.t. constant")
.setPosition(20,200)
.setRange(-100,100)
;
// Slider for blur size
cp5.addSlider("blurSize")
.setLabel("blur size")
.setPosition(20,260)
.setRange(1,20)
;
// Slider for minimum blob size
cp5.addSlider("blobSizeThreshold")
.setLabel("min blob size")
.setPosition(20,290)
.setRange(0,60)
;
// Store the default background color, we gonna need it later
buttonColor = cp5.getController("contrast").getColor().getForeground();
buttonBgColor = cp5.getController("contrast").getColor().getBackground();
}
void toggleAdaptiveThreshold(boolean theFlag) {
useAdaptiveThreshold = theFlag;
if (useAdaptiveThreshold) {
// Lock basic threshold
setLock(cp5.getController("threshold"), true);
// Unlock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), false);
setLock(cp5.getController("thresholdConstant"), false);
} else {
// Unlock basic threshold
setLock(cp5.getController("threshold"), false);
// Lock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), true);
setLock(cp5.getController("thresholdConstant"), true);
}
}
void setLock(Controller theController, boolean theValue) {
theController.setLock(theValue);
if (theValue) {
theController.setColorBackground(color(150,150));
theController.setColorForeground(color(100,100));
} else {
theController.setColorBackground(color(buttonBgColor));
theController.setColorForeground(color(buttonColor));
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 268 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

View file

@ -0,0 +1,90 @@
/**
* Blob Class
*
* Based on this example by Daniel Shiffman:
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* @author: Jordi Tost (@jorditost)
*
* University of Applied Sciences Potsdam, 2014
*/
class Blob {
private PApplet parent;
// Contour
public Contour contour;
// Am I available to be matched?
public boolean available;
// Should I be deleted?
public boolean delete;
// How long should I live if I have disappeared?
private int initTimer = 5; //127;
public int timer;
// Unique ID for each blob
int id;
// Make me
Blob(PApplet parent, int id, Contour c) {
this.parent = parent;
this.id = id;
this.contour = new Contour(parent, c.pointMat);
available = true;
delete = false;
timer = initTimer;
}
// Show me
void display() {
Rectangle r = contour.getBoundingBox();
float opacity = map(timer, 0, initTimer, 0, 127);
fill(0,0,255,opacity);
stroke(0,0,255);
rect(r.x, r.y, r.width, r.height);
fill(255,2*opacity);
textSize(26);
text(""+id, r.x+10, r.y+30);
}
// Give me a new contour for this blob (shape, points, location, size)
// Oooh, it would be nice to lerp here!
void update(Contour newC) {
contour = new Contour(parent, newC.pointMat);
// Is there a way to update the contour's points without creating a new one?
/*ArrayList<PVector> newPoints = newC.getPoints();
Point[] inputPoints = new Point[newPoints.size()];
for(int i = 0; i < newPoints.size(); i++){
inputPoints[i] = new Point(newPoints.get(i).x, newPoints.get(i).y);
}
contour.loadPoints(inputPoints);*/
timer = initTimer;
}
// Count me down, I am gone
void countDown() {
timer--;
}
// I am deed, delete me
boolean dead() {
if (timer < 0) return true;
return false;
}
public Rectangle getBoundingBox() {
return contour.getBoundingBox();
}
}

View file

@ -0,0 +1,455 @@
/**
* Image Filtering
* This sketch will help us to adjust the filter values to optimize blob detection
*
* Persistence algorithm by Daniel Shifmann:
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* @author: Jordi Tost (@jorditost)
* @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFilteringWithBlobPersistence
*
* University of Applied Sciences Potsdam, 2014
*
* It requires the ControlP5 Processing library:
* http://www.sojamo.de/libraries/controlP5/
*/
import gab.opencv.*;
import java.awt.Rectangle;
import processing.video.*;
import controlP5.*;
OpenCV opencv;
Capture video;
PImage src, preProcessedImage, processedImage, contoursImage;
ArrayList<Contour> contours;
// List of detected contours parsed as blobs (every frame)
ArrayList<Contour> newBlobContours;
// List of my blob objects (persistent)
ArrayList<Blob> blobList;
// Number of blobs detected over all time. Used to set IDs.
int blobCount = 0;
float contrast = 1.35;
int brightness = 0;
int threshold = 75;
boolean useAdaptiveThreshold = false; // use basic thresholding
int thresholdBlockSize = 489;
int thresholdConstant = 45;
int blobSizeThreshold = 20;
int blurSize = 4;
// Control vars
ControlP5 cp5;
int buttonColor;
int buttonBgColor;
void setup() {
frameRate(15);
video = new Capture(this, 640, 480);
//video = new Capture(this, 640, 480, "USB2.0 PC CAMERA");
video.start();
opencv = new OpenCV(this, 640, 480);
contours = new ArrayList<Contour>();
// Blobs list
blobList = new ArrayList<Blob>();
size(opencv.width + 200, opencv.height, P2D);
// Init Controls
cp5 = new ControlP5(this);
initControls();
// Set thresholding
toggleAdaptiveThreshold(useAdaptiveThreshold);
}
void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// Load the new frame of our camera in to OpenCV
opencv.loadImage(video);
src = opencv.getSnapshot();
///////////////////////////////
// <1> PRE-PROCESS IMAGE
// - Grey channel
// - Brightness / Contrast
///////////////////////////////
// Gray channel
opencv.gray();
//opencv.brightness(brightness);
opencv.contrast(contrast);
// Save snapshot for display
preProcessedImage = opencv.getSnapshot();
///////////////////////////////
// <2> PROCESS IMAGE
// - Threshold
// - Noise Supression
///////////////////////////////
// Adaptive threshold - Good when non-uniform illumination
if (useAdaptiveThreshold) {
// Block size must be odd and greater than 3
if (thresholdBlockSize%2 == 0) thresholdBlockSize++;
if (thresholdBlockSize < 3) thresholdBlockSize = 3;
opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant);
// Basic threshold - range [0, 255]
} else {
opencv.threshold(threshold);
}
// Invert (black bg, white blobs)
opencv.invert();
// Reduce noise - Dilate and erode to close holes
opencv.dilate();
opencv.erode();
// Blur
opencv.blur(blurSize);
// Save snapshot for display
processedImage = opencv.getSnapshot();
///////////////////////////////
// <3> FIND CONTOURS
///////////////////////////////
detectBlobs();
// Passing 'true' sorts them by descending area.
//contours = opencv.findContours(true, true);
// Save snapshot for display
contoursImage = opencv.getSnapshot();
// Draw
pushMatrix();
// Leave space for ControlP5 sliders
translate(width-src.width, 0);
// Display images
displayImages();
// Display contours in the lower right window
pushMatrix();
scale(0.5);
translate(src.width, src.height);
// Contours
//displayContours();
//displayContoursBoundingBoxes();
// Blobs
displayBlobs();
popMatrix();
popMatrix();
}
///////////////////////
// Display Functions
///////////////////////
void displayImages() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(preProcessedImage, src.width, 0);
image(processedImage, 0, src.height);
image(src, src.width, src.height);
popMatrix();
stroke(255);
fill(255);
textSize(12);
text("Source", 10, 25);
text("Pre-processed Image", src.width/2 + 10, 25);
text("Processed Image", 10, src.height/2 + 25);
text("Tracked Points", src.width/2 + 10, src.height/2 + 25);
}
void displayBlobs() {
for (Blob b : blobList) {
strokeWeight(1);
b.display();
}
}
void displayContours() {
// Contours
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
noFill();
stroke(0, 255, 0);
strokeWeight(3);
contour.draw();
}
}
void displayContoursBoundingBoxes() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
stroke(255, 0, 0);
fill(255, 0, 0, 150);
strokeWeight(2);
rect(r.x, r.y, r.width, r.height);
}
}
////////////////////
// Blob Detection
////////////////////
void detectBlobs() {
// Contours detected in this frame
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
newBlobContours = getBlobsFromContours(contours);
//println(contours.length);
// Check if the detected blobs already exist are new or some has disappeared.
// SCENARIO 1
// blobList is empty
if (blobList.isEmpty()) {
// Just make a Blob object for every face Rectangle
for (int i = 0; i < newBlobContours.size(); i++) {
println("+++ New blob detected with ID: " + blobCount);
blobList.add(new Blob(this, blobCount, newBlobContours.get(i)));
blobCount++;
}
// SCENARIO 2
// We have fewer Blob objects than face Rectangles found from OpenCV in this frame
} else if (blobList.size() <= newBlobContours.size()) {
boolean[] used = new boolean[newBlobContours.size()];
// Match existing Blob objects with a Rectangle
for (Blob b : blobList) {
// Find the new blob newBlobContours.get(index) that is closest to blob b
// set used[index] to true so that it can't be used twice
float record = 50000;
int index = -1;
for (int i = 0; i < newBlobContours.size(); i++) {
float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y);
//float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y);
if (d < record && !used[i]) {
record = d;
index = i;
}
}
// Update Blob object location
used[index] = true;
b.update(newBlobContours.get(index));
}
// Add any unused blobs
for (int i = 0; i < newBlobContours.size(); i++) {
if (!used[i]) {
println("+++ New blob detected with ID: " + blobCount);
blobList.add(new Blob(this, blobCount, newBlobContours.get(i)));
//blobList.add(new Blob(blobCount, blobs[i].x, blobs[i].y, blobs[i].width, blobs[i].height));
blobCount++;
}
}
// SCENARIO 3
// We have more Blob objects than blob Rectangles found from OpenCV in this frame
} else {
// All Blob objects start out as available
for (Blob b : blobList) {
b.available = true;
}
// Match Rectangle with a Blob object
for (int i = 0; i < newBlobContours.size(); i++) {
// Find blob object closest to the newBlobContours.get(i) Contour
// set available to false
float record = 50000;
int index = -1;
for (int j = 0; j < blobList.size(); j++) {
Blob b = blobList.get(j);
float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y);
//float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y);
if (d < record && b.available) {
record = d;
index = j;
}
}
// Update Blob object location
Blob b = blobList.get(index);
b.available = false;
b.update(newBlobContours.get(i));
}
// Start to kill any left over Blob objects
for (Blob b : blobList) {
if (b.available) {
b.countDown();
if (b.dead()) {
b.delete = true;
}
}
}
}
// Delete any blob that should be deleted
for (int i = blobList.size()-1; i >= 0; i--) {
Blob b = blobList.get(i);
if (b.delete) {
blobList.remove(i);
}
}
}
ArrayList<Contour> getBlobsFromContours(ArrayList<Contour> newContours) {
ArrayList<Contour> newBlobs = new ArrayList<Contour>();
// Which of these contours are blobs?
for (int i=0; i<newContours.size(); i++) {
Contour contour = newContours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
newBlobs.add(contour);
}
return newBlobs;
}
//////////////////////////
// CONTROL P5 Functions
//////////////////////////
void initControls() {
// Slider for contrast
cp5.addSlider("contrast")
.setLabel("contrast")
.setPosition(20,50)
.setRange(0.0,6.0)
;
// Slider for threshold
cp5.addSlider("threshold")
.setLabel("threshold")
.setPosition(20,110)
.setRange(0,255)
;
// Toggle to activae adaptive threshold
cp5.addToggle("toggleAdaptiveThreshold")
.setLabel("use adaptive threshold")
.setSize(10,10)
.setPosition(20,144)
;
// Slider for adaptive threshold block size
cp5.addSlider("thresholdBlockSize")
.setLabel("a.t. block size")
.setPosition(20,180)
.setRange(1,700)
;
// Slider for adaptive threshold constant
cp5.addSlider("thresholdConstant")
.setLabel("a.t. constant")
.setPosition(20,200)
.setRange(-100,100)
;
// Slider for blur size
cp5.addSlider("blurSize")
.setLabel("blur size")
.setPosition(20,260)
.setRange(1,20)
;
// Slider for minimum blob size
cp5.addSlider("blobSizeThreshold")
.setLabel("min blob size")
.setPosition(20,290)
.setRange(0,60)
;
// Store the default background color, we gonna need it later
buttonColor = cp5.getController("contrast").getColor().getForeground();
buttonBgColor = cp5.getController("contrast").getColor().getBackground();
}
void toggleAdaptiveThreshold(boolean theFlag) {
useAdaptiveThreshold = theFlag;
if (useAdaptiveThreshold) {
// Lock basic threshold
setLock(cp5.getController("threshold"), true);
// Unlock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), false);
setLock(cp5.getController("thresholdConstant"), false);
} else {
// Unlock basic threshold
setLock(cp5.getController("threshold"), false);
// Lock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), true);
setLock(cp5.getController("thresholdConstant"), true);
}
}
void setLock(Controller theController, boolean theValue) {
theController.setLock(theValue);
if (theValue) {
theController.setColorBackground(color(150,150));
theController.setColorForeground(color(100,100));
} else {
theController.setColorBackground(color(buttonBgColor));
theController.setColorForeground(color(buttonColor));
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

View file

@ -0,0 +1,38 @@
import gab.opencv.*;
import processing.video.*;
import java.awt.*;
Capture video;
OpenCV opencv;
void setup() {
size(640, 480);
video = new Capture(this, 640/2, 480/2);
opencv = new OpenCV(this, 640/2, 480/2);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
video.start();
}
void draw() {
scale(2);
opencv.loadImage(video);
image(video, 0, 0 );
noFill();
stroke(0, 255, 0);
strokeWeight(3);
Rectangle[] faces = opencv.detect();
println(faces.length);
for (int i = 0; i < faces.length; i++) {
println(faces[i].x + "," + faces[i].y);
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}
void captureEvent(Capture c) {
c.read();
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,13 @@
import gab.opencv.*;
OpenCV opencv;
void setup() {
opencv = new OpenCV(this, "test.jpg");
size(opencv.width, opencv.height);
}
void draw() {
image(opencv.getOutput(), 0, 0);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,43 @@
/*
Luma is a better measure of perceived brightness than
the tradition grayscale created by averaging R, G, and B channels.
This sketch demonstrates converting an image to LAB color space
and accessign the Luma channel for comparison with the more common
grayscale version. Uses un-wrapped OpenCV cvtColor() function.
*/
import gab.opencv.*;
// Import the OpenCV Improc class,
// it has the cvtColor() function we need.
import org.opencv.imgproc.Imgproc;
OpenCV opencv;
PImage colorImage, grayImage;
void setup() {
colorImage = loadImage("flashlight.jpg");
opencv = new OpenCV(this, colorImage);
size(opencv.width, opencv.height);
// Save the gray image so we can compare it to Luma
grayImage = opencv.getSnapshot();
// Use built-in OpenCV function to conver the color image from BGR to LAB color space.
Imgproc.cvtColor(opencv.getColor(), opencv.getColor(), Imgproc.COLOR_BGR2Lab);
// Since the channels start out in the order BGRA,
// Converting to LAB will put the Luma in the B channel
opencv.setGray(opencv.getB());
}
void draw() {
background(0);
pushMatrix();
scale(0.5);
image(colorImage, colorImage.width/2, 0);
image(grayImage, 0, colorImage.height);
image(opencv.getOutput(), colorImage.width, colorImage.height);
popMatrix();
fill(255);
text("GRAY", 30, height -25);
text("LUMA", width/2 + 30, height - 25);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View file

@ -0,0 +1,212 @@
import gab.opencv.*;
import org.opencv.imgproc.Imgproc;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.CvType;
import org.opencv.core.Point;
import org.opencv.core.Size;
//import java.util.list;
OpenCV opencv;
PImage src, dst, markerImg;
ArrayList<MatOfPoint> contours;
ArrayList<MatOfPoint2f> approximations;
ArrayList<MatOfPoint2f> markers;
boolean[][] markerCells;
void setup() {
opencv = new OpenCV(this, "marker_test.jpg");
size(opencv.width, opencv.height/2);
src = opencv.getInput();
// hold on to this for later, since adaptiveThreshold is destructive
Mat gray = OpenCV.imitate(opencv.getGray());
opencv.getGray().copyTo(gray);
Mat thresholdMat = OpenCV.imitate(opencv.getGray());
opencv.blur(5);
Imgproc.adaptiveThreshold(opencv.getGray(), thresholdMat, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV, 451, -65);
contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(thresholdMat, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE);
approximations = createPolygonApproximations(contours);
markers = new ArrayList<MatOfPoint2f>();
markers = selectMarkers(approximations);
//// Mat markerMat = grat.submat();
// Mat warped = OpenCVPro.imitate(gray);
//
MatOfPoint2f canonicalMarker = new MatOfPoint2f();
Point[] canonicalPoints = new Point[4];
canonicalPoints[0] = new Point(0, 350);
canonicalPoints[1] = new Point(0, 0);
canonicalPoints[2] = new Point(350, 0);
canonicalPoints[3] = new Point(350, 350);
canonicalMarker.fromArray(canonicalPoints);
println("num points: " + markers.get(0).height());
Mat transform = Imgproc.getPerspectiveTransform(markers.get(0), canonicalMarker);
Mat unWarpedMarker = new Mat(50, 50, CvType.CV_8UC1);
Imgproc.warpPerspective(gray, unWarpedMarker, transform, new Size(350, 350));
Imgproc.threshold(unWarpedMarker, unWarpedMarker, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
float cellSize = 350/7.0;
markerCells = new boolean[7][7];
for (int row = 0; row < 7; row++) {
for (int col = 0; col < 7; col++) {
int cellX = int(col*cellSize);
int cellY = int(row*cellSize);
Mat cell = unWarpedMarker.submat(cellX, cellX +(int)cellSize, cellY, cellY+ (int)cellSize);
markerCells[row][col] = (Core.countNonZero(cell) > (cellSize*cellSize)/2);
}
}
for (int col = 0; col < 7; col++) {
for (int row = 0; row < 7; row++) {
if (markerCells[row][col]) {
print(1);
}
else {
print(0);
}
}
println();
}
dst = createImage(350, 350, RGB);
opencv.toPImage(unWarpedMarker, dst);
}
ArrayList<MatOfPoint2f> selectMarkers(ArrayList<MatOfPoint2f> candidates) {
float minAllowedContourSide = 50;
minAllowedContourSide = minAllowedContourSide * minAllowedContourSide;
ArrayList<MatOfPoint2f> result = new ArrayList<MatOfPoint2f>();
for (MatOfPoint2f candidate : candidates) {
if (candidate.size().height != 4) {
continue;
}
if (!Imgproc.isContourConvex(new MatOfPoint(candidate.toArray()))) {
continue;
}
// eliminate markers where consecutive
// points are too close together
float minDist = src.width * src.width;
Point[] points = candidate.toArray();
for (int i = 0; i < points.length; i++) {
Point side = new Point(points[i].x - points[(i+1)%4].x, points[i].y - points[(i+1)%4].y);
float squaredLength = (float)side.dot(side);
// println("minDist: " + minDist + " squaredLength: " +squaredLength);
minDist = min(minDist, squaredLength);
}
// println(minDist);
if (minDist < minAllowedContourSide) {
continue;
}
result.add(candidate);
}
return result;
}
ArrayList<MatOfPoint2f> createPolygonApproximations(ArrayList<MatOfPoint> cntrs) {
ArrayList<MatOfPoint2f> result = new ArrayList<MatOfPoint2f>();
double epsilon = cntrs.get(0).size().height * 0.01;
println(epsilon);
for (MatOfPoint contour : cntrs) {
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(contour.toArray()), approx, epsilon, true);
result.add(approx);
}
return result;
}
void drawContours(ArrayList<MatOfPoint> cntrs) {
for (MatOfPoint contour : cntrs) {
beginShape();
Point[] points = contour.toArray();
for (int i = 0; i < points.length; i++) {
vertex((float)points[i].x, (float)points[i].y);
}
endShape();
}
}
void drawContours2f(ArrayList<MatOfPoint2f> cntrs) {
for (MatOfPoint2f contour : cntrs) {
beginShape();
Point[] points = contour.toArray();
for (int i = 0; i < points.length; i++) {
vertex((float)points[i].x, (float)points[i].y);
}
endShape(CLOSE);
}
}
void draw() {
pushMatrix();
background(125);
scale(0.5);
image(src, 0, 0);
noFill();
smooth();
strokeWeight(5);
stroke(0, 255, 0);
drawContours2f(markers);
popMatrix();
pushMatrix();
translate(src.width/2, 0);
strokeWeight(1);
image(dst, 0, 0);
float cellSize = dst.width/7.0;
for (int col = 0; col < 7; col++) {
for (int row = 0; row < 7; row++) {
if(markerCells[row][col]){
fill(255);
} else {
fill(0);
}
stroke(0,255,0);
rect(col*cellSize, row*cellSize, cellSize, cellSize);
//line(i*cellSize, 0, i*cellSize, dst.width);
//line(0, i*cellSize, dst.width, i*cellSize);
}
}
popMatrix();
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 372 KiB

View file

@ -0,0 +1,197 @@
/**
* MultipleColorTracking
* Select 4 colors to track them separately
*
* It uses the OpenCV for Processing library by Greg Borenstein
* https://github.com/atduskgreg/opencv-processing
*
* @author: Jordi Tost (@jorditost)
* @url: https://github.com/jorditost/ImageFiltering/tree/master/MultipleColorTracking
*
* University of Applied Sciences Potsdam, 2014
*
* Instructions:
* Press one numerical key [1-4] and click on one color to track it
*/
import gab.opencv.*;
import processing.video.*;
import java.awt.Rectangle;
Capture video;
OpenCV opencv;
PImage src;
ArrayList<Contour> contours;
// <1> Set the range of Hue values for our filter
//ArrayList<Integer> colors;
int maxColors = 4;
int[] hues;
int[] colors;
int rangeWidth = 10;
PImage[] outputs;
int colorToChange = -1;
void setup() {
video = new Capture(this, 640, 480);
opencv = new OpenCV(this, video.width, video.height);
contours = new ArrayList<Contour>();
size(opencv.width + opencv.width/4 + 30, opencv.height, P2D);
// Array for detection colors
colors = new int[maxColors];
hues = new int[maxColors];
outputs = new PImage[maxColors];
video.start();
}
void draw() {
background(150);
if (video.available()) {
video.read();
}
// <2> Load the new frame of our movie in to OpenCV
opencv.loadImage(video);
// Tell OpenCV to use color information
opencv.useColor();
src = opencv.getSnapshot();
// <3> Tell OpenCV to work in HSV color space.
opencv.useColor(HSB);
detectColors();
// Show images
image(src, 0, 0);
for (int i=0; i<outputs.length; i++) {
if (outputs[i] != null) {
image(outputs[i], width-src.width/4, i*src.height/4, src.width/4, src.height/4);
noStroke();
fill(colors[i]);
rect(src.width, i*src.height/4, 30, src.height/4);
}
}
// Print text if new color expected
textSize(20);
stroke(255);
fill(255);
if (colorToChange > -1) {
text("click to change color " + colorToChange, 10, 25);
} else {
text("press key [1-4] to select color", 10, 25);
}
displayContoursBoundingBoxes();
}
//////////////////////
// Detect Functions
//////////////////////
void detectColors() {
for (int i=0; i<hues.length; i++) {
if (hues[i] <= 0) continue;
opencv.loadImage(src);
opencv.useColor(HSB);
// <4> Copy the Hue channel of our image into
// the gray channel, which we process.
opencv.setGray(opencv.getH().clone());
int hueToDetect = hues[i];
//println("index " + i + " - hue to detect: " + hueToDetect);
// <5> Filter the image based on the range of
// hue values that match the object we want to track.
opencv.inRange(hueToDetect-rangeWidth/2, hueToDetect+rangeWidth/2);
//opencv.dilate();
opencv.erode();
// TO DO:
// Add here some image filtering to detect blobs better
// <6> Save the processed image for reference.
outputs[i] = opencv.getSnapshot();
}
// <7> Find contours in our range image.
// Passing 'true' sorts them by descending area.
if (outputs[0] != null) {
opencv.loadImage(outputs[0]);
contours = opencv.findContours(true,true);
}
}
void displayContoursBoundingBoxes() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
Rectangle r = contour.getBoundingBox();
if (r.width < 20 || r.height < 20)
continue;
stroke(255, 0, 0);
fill(255, 0, 0, 150);
strokeWeight(2);
rect(r.x, r.y, r.width, r.height);
}
}
//////////////////////
// Keyboard / Mouse
//////////////////////
void mousePressed() {
if (colorToChange > -1) {
color c = get(mouseX, mouseY);
println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c));
int hue = int(map(hue(c), 0, 255, 0, 180));
colors[colorToChange-1] = c;
hues[colorToChange-1] = hue;
println("color index " + (colorToChange-1) + ", value: " + hue);
}
}
void keyPressed() {
if (key == '1') {
colorToChange = 1;
} else if (key == '2') {
colorToChange = 2;
} else if (key == '3') {
colorToChange = 3;
} else if (key == '4') {
colorToChange = 4;
}
}
void keyReleased() {
colorToChange = -1;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 606 KiB

View file

@ -0,0 +1,2 @@
mode.id=processing.mode.java.JavaMode
mode=Java

View file

@ -0,0 +1,36 @@
import gab.opencv.*;
import processing.video.*;
OpenCV opencv;
Movie video;
void setup() {
size(568*2, 320);
video = new Movie(this, "sample1.mov");
opencv = new OpenCV(this, 568, 320);
video.loop();
video.play();
}
void draw() {
background(0);
opencv.loadImage(video);
opencv.calculateOpticalFlow();
image(video, 0, 0);
translate(video.width,0);
stroke(255,0,0);
opencv.drawOpticalFlow();
PVector aveFlow = opencv.getAverageFlow();
int flowScale = 50;
stroke(255);
strokeWeight(2);
line(video.width/2, video.height/2, video.width/2 + aveFlow.x*flowScale, video.height/2 + aveFlow.y*flowScale);
}
void movieEvent(Movie m) {
m.read();
}

View file

@ -0,0 +1,36 @@
import gab.opencv.*;
PImage src;
OpenCV opencv;
int roiWidth = 150;
int roiHeight = 150;
boolean useROI = true;
void setup() {
src = loadImage("test.jpg");
opencv = new OpenCV(this, src);
size(opencv.width, opencv.height);
}
void draw() {
opencv.loadImage(src);
if (useROI) {
opencv.setROI(mouseX, mouseY, roiWidth, roiHeight);
}
opencv.findCannyEdges(20,75);
image(opencv.getOutput(), 0, 0);
}
// toggle ROI on and off
void keyPressed() {
useROI = !useROI;
if (!useROI) {
opencv.releaseROI();
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View file

@ -0,0 +1,76 @@
import gab.opencv.*;
import org.opencv.imgproc.Imgproc;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.core.Mat;
import org.opencv.core.CvType;
OpenCV opencv;
PImage src;
PImage card;
int cardWidth = 250;
int cardHeight = 350;
Contour contour;
void setup() {
src = loadImage("cards.png");
size(src.width + cardWidth, src.height);
opencv = new OpenCV(this, src);
opencv.blur(1);
opencv.threshold(120);
contour = opencv.findContours(false, true).get(0).getPolygonApproximation();
card = createImage(cardWidth, cardHeight, ARGB);
opencv.toPImage(warpPerspective(contour.getPoints(), cardWidth, cardHeight), card);
}
Mat getPerspectiveTransformation(ArrayList<PVector> inputPoints, int w, int h) {
Point[] canonicalPoints = new Point[4];
canonicalPoints[0] = new Point(w, 0);
canonicalPoints[1] = new Point(0, 0);
canonicalPoints[2] = new Point(0, h);
canonicalPoints[3] = new Point(w, h);
MatOfPoint2f canonicalMarker = new MatOfPoint2f();
canonicalMarker.fromArray(canonicalPoints);
Point[] points = new Point[4];
for (int i = 0; i < 4; i++) {
points[i] = new Point(inputPoints.get(i).x, inputPoints.get(i).y);
}
MatOfPoint2f marker = new MatOfPoint2f(points);
return Imgproc.getPerspectiveTransform(marker, canonicalMarker);
}
Mat warpPerspective(ArrayList<PVector> inputPoints, int w, int h) {
Mat transform = getPerspectiveTransformation(inputPoints, w, h);
Mat unWarpedMarker = new Mat(w, h, CvType.CV_8UC1);
Imgproc.warpPerspective(opencv.getColor(), unWarpedMarker, transform, new Size(w, h));
return unWarpedMarker;
}
void draw() {
image(src, 0, 0);
noFill();
stroke(0, 255, 0);
strokeWeight(4);
contour.draw();
fill(255, 0);
ArrayList<PVector> points = contour.getPoints();
for (int i = 0; i < points.size(); i++) {
text(i, points.get(i).x, points.get(i).y);
}
pushMatrix();
translate(src.width, 0);
image(card, 0, 0);
popMatrix();
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 846 KiB

View file

@ -0,0 +1,64 @@
/**
* Which Face Is Which
* Daniel Shiffman
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* Modified by Jordi Tost (call the constructor specifying an ID)
* @updated: 01/10/2014
*/
class Face {
// A Rectangle
Rectangle r;
// Am I available to be matched?
boolean available;
// Should I be deleted?
boolean delete;
// How long should I live if I have disappeared?
int timer = 127;
// Assign a number to each face
int id;
// Make me
Face(int newID, int x, int y, int w, int h) {
r = new Rectangle(x,y,w,h);
available = true;
delete = false;
id = newID;
}
// Show me
void display() {
fill(0,0,255,timer);
stroke(0,0,255);
rect(r.x,r.y,r.width, r.height);
//rect(r.x*scl,r.y*scl,r.width*scl, r.height*scl);
fill(255,timer*2);
text(""+id,r.x+10,r.y+30);
//text(""+id,r.x*scl+10,r.y*scl+30);
//text(""+id,r.x*scl+10,r.y*scl+30);
}
// Give me a new location / size
// Oooh, it would be nice to lerp here!
void update(Rectangle newR) {
r = (Rectangle) newR.clone();
}
// Count me down, I am gone
void countDown() {
timer--;
}
// I am deed, delete me
boolean dead() {
if (timer < 0) return true;
return false;
}
}

View file

@ -0,0 +1,162 @@
/**
* WhichFace
* Daniel Shiffman
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* Modified by Jordi Tost (@jorditost) to work with the OpenCV library by Greg Borenstein:
* https://github.com/atduskgreg/opencv-processing
*
* @url: https://github.com/jorditost/BlobPersistence/
*
* University of Applied Sciences Potsdam, 2014
*/
import gab.opencv.*;
import processing.video.*;
import java.awt.*;
Capture video;
OpenCV opencv;
// List of my Face objects (persistent)
ArrayList<Face> faceList;
// List of detected faces (every frame)
Rectangle[] faces;
// Number of faces detected over all time. Used to set IDs.
int faceCount = 0;
// Scaling down the video
int scl = 2;
void setup() {
size(640, 480);
video = new Capture(this, width/scl, height/scl);
opencv = new OpenCV(this, width/scl, height/scl);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
faceList = new ArrayList<Face>();
video.start();
}
void draw() {
scale(scl);
opencv.loadImage(video);
image(video, 0, 0 );
detectFaces();
// Draw all the faces
for (int i = 0; i < faces.length; i++) {
noFill();
strokeWeight(5);
stroke(255,0,0);
//rect(faces[i].x*scl,faces[i].y*scl,faces[i].width*scl,faces[i].height*scl);
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
for (Face f : faceList) {
strokeWeight(2);
f.display();
}
}
void detectFaces() {
// Faces detected in this frame
faces = opencv.detect();
// Check if the detected faces already exist are new or some has disappeared.
// SCENARIO 1
// faceList is empty
if (faceList.isEmpty()) {
// Just make a Face object for every face Rectangle
for (int i = 0; i < faces.length; i++) {
println("+++ New face detected with ID: " + faceCount);
faceList.add(new Face(faceCount, faces[i].x,faces[i].y,faces[i].width,faces[i].height));
faceCount++;
}
// SCENARIO 2
// We have fewer Face objects than face Rectangles found from OPENCV
} else if (faceList.size() <= faces.length) {
boolean[] used = new boolean[faces.length];
// Match existing Face objects with a Rectangle
for (Face f : faceList) {
// Find faces[index] that is closest to face f
// set used[index] to true so that it can't be used twice
float record = 50000;
int index = -1;
for (int i = 0; i < faces.length; i++) {
float d = dist(faces[i].x,faces[i].y,f.r.x,f.r.y);
if (d < record && !used[i]) {
record = d;
index = i;
}
}
// Update Face object location
used[index] = true;
f.update(faces[index]);
}
// Add any unused faces
for (int i = 0; i < faces.length; i++) {
if (!used[i]) {
println("+++ New face detected with ID: " + faceCount);
faceList.add(new Face(faceCount, faces[i].x,faces[i].y,faces[i].width,faces[i].height));
faceCount++;
}
}
// SCENARIO 3
// We have more Face objects than face Rectangles found
} else {
// All Face objects start out as available
for (Face f : faceList) {
f.available = true;
}
// Match Rectangle with a Face object
for (int i = 0; i < faces.length; i++) {
// Find face object closest to faces[i] Rectangle
// set available to false
float record = 50000;
int index = -1;
for (int j = 0; j < faceList.size(); j++) {
Face f = faceList.get(j);
float d = dist(faces[i].x,faces[i].y,f.r.x,f.r.y);
if (d < record && f.available) {
record = d;
index = j;
}
}
// Update Face object location
Face f = faceList.get(index);
f.available = false;
f.update(faces[i]);
}
// Start to kill any left over Face objects
for (Face f : faceList) {
if (f.available) {
f.countDown();
if (f.dead()) {
f.delete = true;
}
}
}
}
// Delete any that should be deleted
for (int i = faceList.size()-1; i >= 0; i--) {
Face f = faceList.get(i);
if (f.delete) {
faceList.remove(i);
}
}
}
void captureEvent(Capture c) {
c.read();
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 390 KiB

View file

@ -0,0 +1,43 @@
import gab.opencv.*;
OpenCV opencv;
PImage threshold, blur, adaptive, gray;
void setup() {
PImage img = loadImage("test.jpg");
size(img.width, img.height);
// By default, OpenCV for Processing works with a gray
// version of the source image
opencv = new OpenCV(this, img);
// but you can tell it explicitly to use color instead:
opencv.useColor();
// A lot of OpenCV operations only work on grayscale images.
// But some do work in color, like threshold, blur, findCannyEdges, findChessboardCorners, etc.:
opencv.threshold(75);
threshold = opencv.getSnapshot();
opencv.blur(30);
blur = opencv.getSnapshot();
// If you try an operation that does not work in color
// it will print out an error message and leave the image unaffected
opencv.adaptiveThreshold(591, 1);
adaptive = opencv.getSnapshot();
// if you convert the image to gray then you can
// do gray-only operations
opencv.gray();
opencv.adaptiveThreshold(591, 1);
gray = opencv.getSnapshot();
}
void draw() {
scale(0.5);
image(threshold, 0, 0);
image(blur, threshold.width,0);
image(adaptive, 0,threshold.height);
image(gray, threshold.width, threshold.height);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB