Browse Source

Calibrate works?

master
jan 4 years ago
parent
commit
cc711c6343
100 changed files with 28757 additions and 63 deletions
  1. +58
    -63
      .idea/workspace.xml
  2. BIN
      lib/arduino.jar
  3. BIN
      lib/controlP5.jar
  4. +7
    -0
      lib/opencv_processing/data/README
  5. +37
    -0
      lib/opencv_processing/examples/BackgroundSubtraction/BackgroundSubtraction.pde
  6. BIN
      lib/opencv_processing/examples/BackgroundSubtraction/data/street.mov
  7. +22
    -0
      lib/opencv_processing/examples/BrightestPoint/BrightestPoint.pde
  8. BIN
      lib/opencv_processing/examples/BrightestPoint/robot_light.jpg
  9. +17
    -0
      lib/opencv_processing/examples/BrightnessContrast/BrightnessContrast.pde
  10. BIN
      lib/opencv_processing/examples/BrightnessContrast/test.jpg
  11. +25
    -0
      lib/opencv_processing/examples/CalibrationDemo/CalibrationDemo.pde
  12. BIN
      lib/opencv_processing/examples/CalibrationDemo/data/checkerboard.jpg
  13. +46
    -0
      lib/opencv_processing/examples/ColorChannels/ColorChannels.pde
  14. BIN
      lib/opencv_processing/examples/ColorChannels/green_object.png
  15. +58
    -0
      lib/opencv_processing/examples/DepthFromStereo/DepthFromStereo.pde
  16. BIN
      lib/opencv_processing/examples/DepthFromStereo/scene_l.jpg
  17. BIN
      lib/opencv_processing/examples/DepthFromStereo/scene_r.jpg
  18. +51
    -0
      lib/opencv_processing/examples/DilationAndErosion/DilationAndErosion.pde
  19. BIN
      lib/opencv_processing/examples/DilationAndErosion/line_drawing.jpg
  20. BIN
      lib/opencv_processing/examples/DilationAndErosion/pen_sketch.jpg
  21. +25
    -0
      lib/opencv_processing/examples/FaceDetection/FaceDetection.pde
  22. BIN
      lib/opencv_processing/examples/FaceDetection/data/test.jpg
  23. BIN
      lib/opencv_processing/examples/FaceDetection/data/test.png
  24. BIN
      lib/opencv_processing/examples/FaceDetection/data/testImage.png
  25. BIN
      lib/opencv_processing/examples/FaceDetection/data/transparent_test.png
  26. +40
    -0
      lib/opencv_processing/examples/FilterImages/FilterImages.pde
  27. BIN
      lib/opencv_processing/examples/FilterImages/test.jpg
  28. +42
    -0
      lib/opencv_processing/examples/FindContours/FindContours.pde
  29. BIN
      lib/opencv_processing/examples/FindContours/test.jpg
  30. +38
    -0
      lib/opencv_processing/examples/FindEdges/FindEdges.pde
  31. BIN
      lib/opencv_processing/examples/FindEdges/test.jpg
  32. +47
    -0
      lib/opencv_processing/examples/FindHistogram/FindHistogram.pde
  33. BIN
      lib/opencv_processing/examples/FindHistogram/test.jpg
  34. +107
    -0
      lib/opencv_processing/examples/HSVColorTracking/HSVColorTracking.pde
  35. BIN
      lib/opencv_processing/examples/HSVColorTracking/screenshots/hsv_color_tracking.png
  36. +61
    -0
      lib/opencv_processing/examples/HistogramSkinDetection/HistogramSkinDetection.pde
  37. BIN
      lib/opencv_processing/examples/HistogramSkinDetection/data/cb-cr.png
  38. BIN
      lib/opencv_processing/examples/HistogramSkinDetection/data/test.jpg
  39. +38
    -0
      lib/opencv_processing/examples/HoughLineDetection/HoughLineDetection.pde
  40. BIN
      lib/opencv_processing/examples/HoughLineDetection/film_scan.jpg
  41. +64
    -0
      lib/opencv_processing/examples/HueRangeSelection/HueRangeSelection.pde
  42. BIN
      lib/opencv_processing/examples/HueRangeSelection/colored_balls.jpg
  43. BIN
      lib/opencv_processing/examples/HueRangeSelection/rainbow.jpg
  44. +38
    -0
      lib/opencv_processing/examples/ImageDiff/ImageDiff.pde
  45. BIN
      lib/opencv_processing/examples/ImageDiff/after.jpg
  46. BIN
      lib/opencv_processing/examples/ImageDiff/before.jpg
  47. +297
    -0
      lib/opencv_processing/examples/ImageFiltering/ImageFiltering.pde
  48. BIN
      lib/opencv_processing/examples/ImageFiltering/screenshots/objects_basic_threshold.png
  49. BIN
      lib/opencv_processing/examples/ImageFiltering/screenshots/touch_adaptive_threshold.png
  50. +90
    -0
      lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/Blob.pde
  51. +455
    -0
      lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/ImageFilteringWithBlobPersistence.pde
  52. BIN
      lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/screenshots/blob_persistence.png
  53. +38
    -0
      lib/opencv_processing/examples/LiveCamTest/LiveCamTest.pde
  54. +26161
    -0
      lib/opencv_processing/examples/LiveCamTest/data/haarcascade_frontalface_alt.xml
  55. +13
    -0
      lib/opencv_processing/examples/LoadAndDisplayImage/LoadAndDisplayImage.pde
  56. BIN
      lib/opencv_processing/examples/LoadAndDisplayImage/data/test.jpg
  57. BIN
      lib/opencv_processing/examples/LoadAndDisplayImage/data/test.png
  58. +43
    -0
      lib/opencv_processing/examples/LumaVsGray/LumaVsGray.pde
  59. BIN
      lib/opencv_processing/examples/LumaVsGray/flashlight.jpg
  60. +212
    -0
      lib/opencv_processing/examples/MarkerDetection/MarkerDetection.pde
  61. BIN
      lib/opencv_processing/examples/MarkerDetection/marker_test.jpg
  62. +197
    -0
      lib/opencv_processing/examples/MultipleColorTracking/MultipleColorTracking.pde
  63. BIN
      lib/opencv_processing/examples/MultipleColorTracking/screenshots/multiple_color_tracking.png
  64. +2
    -0
      lib/opencv_processing/examples/MultipleColorTracking/sketch.properties
  65. +36
    -0
      lib/opencv_processing/examples/OpticalFlow/OpticalFlow.pde
  66. BIN
      lib/opencv_processing/examples/OpticalFlow/data/sample1.mov
  67. +36
    -0
      lib/opencv_processing/examples/RegionOfInterest/RegionOfInterest.pde
  68. BIN
      lib/opencv_processing/examples/RegionOfInterest/test.jpg
  69. +76
    -0
      lib/opencv_processing/examples/WarpPerspective/WarpPerspective.pde
  70. BIN
      lib/opencv_processing/examples/WarpPerspective/cards.png
  71. +64
    -0
      lib/opencv_processing/examples/WhichFace/Face.pde
  72. +162
    -0
      lib/opencv_processing/examples/WhichFace/WhichFace.pde
  73. BIN
      lib/opencv_processing/examples/WhichFace/screenshots/whichface.png
  74. +43
    -0
      lib/opencv_processing/examples/WorkingWithColorImages/WorkingWithColorImages.pde
  75. BIN
      lib/opencv_processing/examples/WorkingWithColorImages/test.jpg
  76. +11
    -0
      lib/opencv_processing/library.properties
  77. BIN
      lib/opencv_processing/library/arm7/cv2.so
  78. BIN
      lib/opencv_processing/library/arm7/libopencv_calib3d.so
  79. BIN
      lib/opencv_processing/library/arm7/libopencv_calib3d.so.2.4
  80. BIN
      lib/opencv_processing/library/arm7/libopencv_calib3d.so.2.4.5
  81. BIN
      lib/opencv_processing/library/arm7/libopencv_calib3d_pch_dephelp.a
  82. BIN
      lib/opencv_processing/library/arm7/libopencv_contrib.so
  83. BIN
      lib/opencv_processing/library/arm7/libopencv_contrib.so.2.4
  84. BIN
      lib/opencv_processing/library/arm7/libopencv_contrib.so.2.4.5
  85. BIN
      lib/opencv_processing/library/arm7/libopencv_contrib_pch_dephelp.a
  86. BIN
      lib/opencv_processing/library/arm7/libopencv_core.so
  87. BIN
      lib/opencv_processing/library/arm7/libopencv_core.so.2.4
  88. BIN
      lib/opencv_processing/library/arm7/libopencv_core.so.2.4.5
  89. BIN
      lib/opencv_processing/library/arm7/libopencv_core_pch_dephelp.a
  90. BIN
      lib/opencv_processing/library/arm7/libopencv_features2d.so
  91. BIN
      lib/opencv_processing/library/arm7/libopencv_features2d.so.2.4
  92. BIN
      lib/opencv_processing/library/arm7/libopencv_features2d.so.2.4.5
  93. BIN
      lib/opencv_processing/library/arm7/libopencv_features2d_pch_dephelp.a
  94. BIN
      lib/opencv_processing/library/arm7/libopencv_flann.so
  95. BIN
      lib/opencv_processing/library/arm7/libopencv_flann.so.2.4
  96. BIN
      lib/opencv_processing/library/arm7/libopencv_flann.so.2.4.5
  97. BIN
      lib/opencv_processing/library/arm7/libopencv_flann_pch_dephelp.a
  98. BIN
      lib/opencv_processing/library/arm7/libopencv_gpu.so
  99. BIN
      lib/opencv_processing/library/arm7/libopencv_gpu.so.2.4
  100. BIN
      lib/opencv_processing/library/arm7/libopencv_gpu.so.2.4.5

+ 58
- 63
.idea/workspace.xml View File

@@ -2,12 +2,7 @@
<project version="4">
<component name="ChangeListManager">
<list default="true" id="6e752a8c-6cb9-4ef9-9031-0329ce15fcb4" name="Default" comment="">
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/.idea/compiler.xml" afterPath="$PROJECT_DIR$/.idea/compiler.xml" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/.idea/workspace.xml" afterPath="$PROJECT_DIR$/.idea/workspace.xml" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/src/Camera.java" afterPath="$PROJECT_DIR$/src/Camera.java" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/src/EuglenaApplet.java" afterPath="$PROJECT_DIR$/src/EuglenaApplet.java" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/src/LEDControl.java" afterPath="$PROJECT_DIR$/src/LEDControl.java" />
<change type="MODIFICATION" beforePath="$PROJECT_DIR$/src/ProjectorApplet.java" afterPath="$PROJECT_DIR$/src/ProjectorApplet.java" />
</list>
<ignored path="processing-intellij.iws" />
<ignored path=".idea/workspace.xml" />
@@ -31,8 +26,8 @@
<file leaf-file-name="euglena_basic_stimuli.java" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/src/euglena_basic_stimuli.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="759">
<caret line="85" column="29" selection-start-line="85" selection-start-column="29" selection-end-line="85" selection-end-column="29" />
<state relative-caret-position="489">
<caret line="325" column="0" selection-start-line="325" selection-start-column="0" selection-end-line="325" selection-end-column="0" />
<folding>
<element signature="e#0#16478#0" expanded="true" />
<element signature="imports" expanded="true" />
@@ -55,8 +50,8 @@
<file leaf-file-name="EuglenaApplet.java" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/src/EuglenaApplet.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="540">
<caret line="41" column="13" selection-start-line="41" selection-start-column="13" selection-end-line="41" selection-end-column="13" />
<state relative-caret-position="504">
<caret line="39" column="5" selection-start-line="39" selection-start-column="5" selection-end-line="39" selection-end-column="5" />
<folding>
<element signature="e#349#350#0" expanded="true" />
<element signature="e#394#395#0" expanded="true" />
@@ -70,8 +65,8 @@
<file leaf-file-name="ProjectorApplet.java" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/src/ProjectorApplet.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="630">
<caret line="35" column="0" selection-start-line="35" selection-start-column="0" selection-end-line="35" selection-end-column="0" />
<state relative-caret-position="594">
<caret line="33" column="18" selection-start-line="33" selection-start-column="8" selection-end-line="33" selection-end-column="18" />
<folding>
<element signature="e#394#395#0" expanded="true" />
<element signature="e#453#454#0" expanded="true" />
@@ -91,18 +86,18 @@
<file leaf-file-name="LEDControl.java" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/src/LEDControl.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="576">
<caret line="35" column="33" selection-start-line="35" selection-start-column="24" selection-end-line="35" selection-end-column="33" />
<state relative-caret-position="378">
<caret line="22" column="0" selection-start-line="22" selection-start-column="0" selection-end-line="22" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</file>
<file leaf-file-name="Camera.java" pinned="false" current-in-tab="true">
<file leaf-file-name="Camera.java" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/src/Camera.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="144">
<caret line="8" column="6" selection-start-line="8" selection-start-column="6" selection-end-line="8" selection-end-column="6" />
<state relative-caret-position="396">
<caret line="22" column="49" selection-start-line="22" selection-start-column="49" selection-end-line="22" selection-end-column="49" />
<folding>
<element signature="imports" expanded="true" />
<element signature="e#1015#1016#0" expanded="true" />
@@ -115,8 +110,8 @@
<file leaf-file-name="Menu.java" pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/src/Menu.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="198">
<caret line="20" column="63" selection-start-line="20" selection-start-column="63" selection-end-line="20" selection-end-column="63" />
<state relative-caret-position="342">
<caret line="188" column="2" selection-start-line="188" selection-start-column="2" selection-end-line="188" selection-end-column="2" />
<folding>
<element signature="imports" expanded="true" />
</folding>
@@ -124,13 +119,13 @@
</provider>
</entry>
</file>
<file leaf-file-name="Calibrator.java" pinned="false" current-in-tab="false">
<file leaf-file-name="Calibrator.java" pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/src/Calibrator.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="318">
<caret line="38" column="0" selection-start-line="38" selection-start-column="0" selection-end-line="38" selection-end-column="0" />
<state relative-caret-position="18">
<caret line="28" column="20" selection-start-line="28" selection-start-column="20" selection-end-line="28" selection-end-column="20" />
<folding>
<element signature="e#0#3795#0" expanded="true" />
<element signature="e#0#3796#0" expanded="true" />
</folding>
</state>
</provider>
@@ -167,10 +162,10 @@
<option value="$PROJECT_DIR$/src/EllipseClass.java" />
<option value="$PROJECT_DIR$/src/RectangleClass.java" />
<option value="$PROJECT_DIR$/src/EuglenaApplet.java" />
<option value="$PROJECT_DIR$/src/Calibrator.java" />
<option value="$PROJECT_DIR$/src/LEDControl.java" />
<option value="$PROJECT_DIR$/lib/core.jar!/processing/core/PApplet.class" />
<option value="$PROJECT_DIR$/src/ProjectorApplet.java" />
<option value="$PROJECT_DIR$/src/LEDControl.java" />
<option value="$PROJECT_DIR$/src/Calibrator.java" />
</list>
</option>
</component>
@@ -610,17 +605,6 @@
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/euglena_basic_stimuli.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="759">
<caret line="85" column="29" selection-start-line="85" selection-start-column="29" selection-end-line="85" selection-end-column="29" />
<folding>
<element signature="e#0#16478#0" expanded="true" />
<element signature="imports" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="jar://$PROJECT_DIR$/lib/core.jar!/processing/core/PApplet.class">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="94216">
@@ -629,30 +613,10 @@
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/Calibrator.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="318">
<caret line="38" column="0" selection-start-line="38" selection-start-column="0" selection-end-line="38" selection-end-column="0" />
<folding>
<element signature="e#0#3795#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/Menu.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="198">
<caret line="20" column="63" selection-start-line="20" selection-start-column="63" selection-end-line="20" selection-end-column="63" />
<folding>
<element signature="imports" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/EuglenaApplet.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="540">
<caret line="41" column="13" selection-start-line="41" selection-start-column="13" selection-end-line="41" selection-end-column="13" />
<state relative-caret-position="504">
<caret line="39" column="5" selection-start-line="39" selection-start-column="5" selection-end-line="39" selection-end-column="5" />
<folding>
<element signature="e#349#350#0" expanded="true" />
<element signature="e#394#395#0" expanded="true" />
@@ -664,8 +628,8 @@
</entry>
<entry file="file://$PROJECT_DIR$/src/ProjectorApplet.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="630">
<caret line="35" column="0" selection-start-line="35" selection-start-column="0" selection-end-line="35" selection-end-column="0" />
<state relative-caret-position="594">
<caret line="33" column="18" selection-start-line="33" selection-start-column="8" selection-end-line="33" selection-end-column="18" />
<folding>
<element signature="e#394#395#0" expanded="true" />
<element signature="e#453#454#0" expanded="true" />
@@ -681,18 +645,39 @@
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/euglena_basic_stimuli.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="489">
<caret line="325" column="0" selection-start-line="325" selection-start-column="0" selection-end-line="325" selection-end-column="0" />
<folding>
<element signature="e#0#16478#0" expanded="true" />
<element signature="imports" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/LEDControl.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="576">
<caret line="35" column="33" selection-start-line="35" selection-start-column="24" selection-end-line="35" selection-end-column="33" />
<state relative-caret-position="378">
<caret line="22" column="0" selection-start-line="22" selection-start-column="0" selection-end-line="22" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/Menu.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="342">
<caret line="188" column="2" selection-start-line="188" selection-start-column="2" selection-end-line="188" selection-end-column="2" />
<folding>
<element signature="imports" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/Camera.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="144">
<caret line="8" column="6" selection-start-line="8" selection-start-column="6" selection-end-line="8" selection-end-column="6" />
<state relative-caret-position="396">
<caret line="22" column="49" selection-start-line="22" selection-start-column="49" selection-end-line="22" selection-end-column="49" />
<folding>
<element signature="imports" expanded="true" />
<element signature="e#1015#1016#0" expanded="true" />
@@ -701,6 +686,16 @@
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/src/Calibrator.java">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="18">
<caret line="28" column="20" selection-start-line="28" selection-start-column="20" selection-end-line="28" selection-end-column="20" />
<folding>
<element signature="e#0#3796#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</component>
<component name="masterDetails">
<states>


BIN
lib/arduino.jar View File


BIN
lib/controlP5.jar View File


+ 7
- 0
lib/opencv_processing/data/README View File

@@ -0,0 +1,7 @@
the data folder:
If your library is using files like images, sound files,
any data file, etc., put them into the data folder.
When coding your library you can use processing's internal loading
functions like loadImage(), loadStrings(), etc. to load files
located inside the data folder into your library.


+ 37
- 0
lib/opencv_processing/examples/BackgroundSubtraction/BackgroundSubtraction.pde View File

@@ -0,0 +1,37 @@
import gab.opencv.*;
import processing.video.*;

Movie video;
OpenCV opencv;

void setup() {
size(720, 480);
video = new Movie(this, "street.mov");
opencv = new OpenCV(this, 720, 480);
opencv.startBackgroundSubtraction(5, 3, 0.5);
video.loop();
video.play();
}

void draw() {
image(video, 0, 0);
opencv.loadImage(video);
opencv.updateBackground();
opencv.dilate();
opencv.erode();

noFill();
stroke(255, 0, 0);
strokeWeight(3);
for (Contour contour : opencv.findContours()) {
contour.draw();
}
}

void movieEvent(Movie m) {
m.read();
}

BIN
lib/opencv_processing/examples/BackgroundSubtraction/data/street.mov View File


+ 22
- 0
lib/opencv_processing/examples/BrightestPoint/BrightestPoint.pde View File

@@ -0,0 +1,22 @@
import gab.opencv.*;

OpenCV opencv;

void setup() {
PImage src = loadImage("robot_light.jpg");
src.resize(800, 0);
size(src.width, src.height);
opencv = new OpenCV(this, src);
}

void draw() {
image(opencv.getOutput(), 0, 0);
PVector loc = opencv.max();
stroke(255, 0, 0);
strokeWeight(4);
noFill();
ellipse(loc.x, loc.y, 10, 10);
}


BIN
lib/opencv_processing/examples/BrightestPoint/robot_light.jpg View File

Before After
Width: 1620  |  Height: 1080  |  Size: 205KB

+ 17
- 0
lib/opencv_processing/examples/BrightnessContrast/BrightnessContrast.pde View File

@@ -0,0 +1,17 @@
import gab.opencv.*;

PImage img;
OpenCV opencv;

void setup(){
img = loadImage("test.jpg");
size(img.width, img.height);
opencv = new OpenCV(this, img);
}

void draw(){
opencv.loadImage(img);
opencv.brightness((int)map(mouseX, 0, width, -255, 255));
image(opencv.getOutput(),0,0);
}


BIN
lib/opencv_processing/examples/BrightnessContrast/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 25
- 0
lib/opencv_processing/examples/CalibrationDemo/CalibrationDemo.pde View File

@@ -0,0 +1,25 @@
import gab.opencv.*;

PImage src;
ArrayList<PVector> cornerPoints;
OpenCV opencv;

void setup() {
src = loadImage("checkerboard.jpg");
src.resize(500, 0);
size(src.width, src.height);

opencv = new OpenCV(this, src);
opencv.gray();
cornerPoints = opencv.findChessboardCorners(9,6);
}

void draw() {
image( opencv.getOutput(), 0, 0);
fill(255,0,0);
noStroke();
for(PVector p : cornerPoints){
ellipse(p.x, p.y, 5, 5);
}
}

BIN
lib/opencv_processing/examples/CalibrationDemo/data/checkerboard.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 115KB

+ 46
- 0
lib/opencv_processing/examples/ColorChannels/ColorChannels.pde View File

@@ -0,0 +1,46 @@
import gab.opencv.*;

OpenCV opencv;
PImage src, r, g, b, h, s, v;

int imgH, imgW;

void setup() {
src = loadImage("green_object.png");
src.resize(800,0);
opencv = new OpenCV(this, src);
size(int(opencv.width*1.5), int(opencv.height * 1.5));
imgH = src.height/2;
imgW = src.width/2;
r = opencv.getSnapshot(opencv.getR());
g = opencv.getSnapshot(opencv.getG());
b = opencv.getSnapshot(opencv.getB());
opencv.useColor(HSB);
h = opencv.getSnapshot(opencv.getH());
s = opencv.getSnapshot(opencv.getS());
v = opencv.getSnapshot(opencv.getV());
}

void draw() {
background(0);
noTint();
image(src, imgW,0, imgW, imgH);
tint(255,0,0);
image(r, 0, imgH, imgW, imgH);
tint(0,255,0);
image(g, imgW, imgH, imgW, imgH);
tint(0,0,255);
image(b, 2*imgW, imgH, imgW, imgH);
noTint();
image(h, 0, 2*imgH, imgW, imgH);
image(s, imgW, 2*imgH, imgW, imgH);
image(v, 2*imgW, 2*imgH, imgW, imgH);
}

BIN
lib/opencv_processing/examples/ColorChannels/green_object.png View File

Before After
Width: 2563  |  Height: 1438  |  Size: 3.2MB

+ 58
- 0
lib/opencv_processing/examples/DepthFromStereo/DepthFromStereo.pde View File

@@ -0,0 +1,58 @@
import gab.opencv.*;
import org.opencv.core.Mat;
import org.opencv.calib3d.StereoBM;
import org.opencv.core.CvType;
import org.opencv.calib3d.StereoSGBM;

OpenCV ocvL, ocvR;
PImage imgL, imgR, depth1, depth2;

void setup() {

imgL = loadImage("scene_l.jpg");
imgR = loadImage("scene_r.jpg");
ocvL = new OpenCV(this, imgL);

ocvR = new OpenCV(this, imgR);

size(ocvL.width * 2, ocvL.height*2);

ocvL.gray();
ocvR.gray();
Mat left = ocvL.getGray();
Mat right = ocvR.getGray();

Mat disparity = OpenCV.imitate(left);

StereoSGBM stereo = new StereoSGBM(0, 32, 3, 128, 256, 20, 16, 1, 100, 20, true);
stereo.compute(left, right, disparity );

Mat depthMat = OpenCV.imitate(left);
disparity.convertTo(depthMat, depthMat.type());

depth1 = createImage(depthMat.width(), depthMat.height(), RGB);
ocvL.toPImage(depthMat, depth1);

StereoBM stereo2 = new StereoBM();
stereo2.compute(left, right, disparity );
disparity.convertTo(depthMat, depthMat.type());


depth2 = createImage(depthMat.width(), depthMat.height(), RGB);
ocvL.toPImage(depthMat, depth2);
}

void draw() {
image(imgL, 0, 0);
image(imgR, imgL.width, 0);

image(depth1, 0, imgL.height);
image(depth2, imgL.width, imgL.height);

fill(255, 0, 0);
text("left", 10, 20);
text("right", 10 + imgL.width, 20);
text("stereo SGBM", 10, imgL.height + 20);
text("stereo BM", 10 + imgL.width, imgL.height+ 20);
}


BIN
lib/opencv_processing/examples/DepthFromStereo/scene_l.jpg View File

Before After
Width: 384  |  Height: 288  |  Size: 70KB

BIN
lib/opencv_processing/examples/DepthFromStereo/scene_r.jpg View File

Before After
Width: 384  |  Height: 288  |  Size: 70KB

+ 51
- 0
lib/opencv_processing/examples/DilationAndErosion/DilationAndErosion.pde View File

@@ -0,0 +1,51 @@
import gab.opencv.*;

PImage src, dilated, eroded, both;
OpenCV opencv;

void setup() {
src = loadImage("pen_sketch.jpg");
src.resize(src.width/2, 0);
size(src.width*2, src.height*2);

opencv = new OpenCV(this, src);

// Dilate and Erode both need a binary image
// So, we'll make it gray and threshold it.
opencv.gray();
opencv.threshold(100);
// We'll also invert so that erosion eats away the lines
// and dilation expands them (rather than vice-versa)
opencv.invert();
// save a snapshot to use in both operations
src = opencv.getSnapshot();

// erode and save snapshot for display
opencv.erode();
eroded = opencv.getSnapshot();

// reload un-eroded image and dilate it
opencv.loadImage(src);
opencv.dilate();
// save dilated version for display
dilated = opencv.getSnapshot();
// now erode on top of dilated version to close holes
opencv.erode();
both = opencv.getSnapshot();
noLoop();
}

void draw() {
image(src, 0, 0);
image(eroded, src.width, 0);
image(dilated, 0, src.height);
image(both, src.width, src.height);

fill(0, 255, 0);
text("original", 20, 20);
text("erode", src.width + 20, 20);
text("dilate", 20, src.height+20);
text("dilate then erode\n(close holes)", src.width+20, src.height+20);
}


BIN
lib/opencv_processing/examples/DilationAndErosion/line_drawing.jpg View File

Before After
Width: 800  |  Height: 661  |  Size: 191KB

BIN
lib/opencv_processing/examples/DilationAndErosion/pen_sketch.jpg View File

Before After
Width: 800  |  Height: 786  |  Size: 62KB

+ 25
- 0
lib/opencv_processing/examples/FaceDetection/FaceDetection.pde View File

@@ -0,0 +1,25 @@
import gab.opencv.*;
import java.awt.Rectangle;

OpenCV opencv;
Rectangle[] faces;

void setup() {
opencv = new OpenCV(this, "test.jpg");
size(opencv.width, opencv.height);

opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
faces = opencv.detect();
}

void draw() {
image(opencv.getInput(), 0, 0);

noFill();
stroke(0, 255, 0);
strokeWeight(3);
for (int i = 0; i < faces.length; i++) {
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}


BIN
lib/opencv_processing/examples/FaceDetection/data/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

BIN
lib/opencv_processing/examples/FaceDetection/data/test.png View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

BIN
lib/opencv_processing/examples/FaceDetection/data/testImage.png View File

Before After
Width: 500  |  Height: 500  |  Size: 43KB

BIN
lib/opencv_processing/examples/FaceDetection/data/transparent_test.png View File

Before After
Width: 300  |  Height: 300  |  Size: 4.5KB

+ 40
- 0
lib/opencv_processing/examples/FilterImages/FilterImages.pde View File

@@ -0,0 +1,40 @@
import gab.opencv.*;

OpenCV opencv;
PImage img, thresh, blur, adaptive;

void setup() {
img = loadImage("test.jpg");
size(img.width, img.height);

opencv = new OpenCV(this, img);
PImage gray = opencv.getSnapshot();
opencv.threshold(80);
thresh = opencv.getSnapshot();
opencv.loadImage(gray);
opencv.blur(12);
blur = opencv.getSnapshot();
opencv.loadImage(gray);
opencv.adaptiveThreshold(591, 1);
adaptive = opencv.getSnapshot();
}

void draw() {
pushMatrix();
scale(0.5);
image(img, 0, 0);
image(thresh, img.width, 0);
image(blur, 0, img.height);
image(adaptive, img.width, img.height);
popMatrix();

fill(0);
text("source", img.width/2 - 100, 20 );
text("threshold", img.width - 100, 20 );
text("blur", img.width/2 - 100, img.height/2 + 20 );
text("adaptive threshold", img.width - 150, img.height/2 + 20 );
}


BIN
lib/opencv_processing/examples/FilterImages/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 42
- 0
lib/opencv_processing/examples/FindContours/FindContours.pde View File

@@ -0,0 +1,42 @@
import gab.opencv.*;

PImage src, dst;
OpenCV opencv;

ArrayList<Contour> contours;
ArrayList<Contour> polygons;

void setup() {
src = loadImage("test.jpg");
size(src.width, src.height/2);
opencv = new OpenCV(this, src);

opencv.gray();
opencv.threshold(70);
dst = opencv.getOutput();

contours = opencv.findContours();
println("found " + contours.size() + " contours");
}

void draw() {
scale(0.5);
image(src, 0, 0);
image(dst, src.width, 0);

noFill();
strokeWeight(3);
for (Contour contour : contours) {
stroke(0, 255, 0);
contour.draw();
stroke(255, 0, 0);
beginShape();
for (PVector point : contour.getPolygonApproximation().getPoints()) {
vertex(point.x, point.y);
}
endShape();
}
}


BIN
lib/opencv_processing/examples/FindContours/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 38
- 0
lib/opencv_processing/examples/FindEdges/FindEdges.pde View File

@@ -0,0 +1,38 @@
import gab.opencv.*;

OpenCV opencv;
PImage src, canny, scharr, sobel;

void setup() {
src = loadImage("test.jpg");
size(src.width, src.height);
opencv = new OpenCV(this, src);
opencv.findCannyEdges(20,75);
canny = opencv.getSnapshot();
opencv.loadImage(src);
opencv.findScharrEdges(OpenCV.HORIZONTAL);
scharr = opencv.getSnapshot();
opencv.loadImage(src);
opencv.findSobelEdges(1,0);
sobel = opencv.getSnapshot();
}


void draw() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(canny, src.width, 0);
image(scharr, 0, src.height);
image(sobel, src.width, src.height);
popMatrix();

text("Source", 10, 25);
text("Canny", src.width/2 + 10, 25);
text("Scharr", 10, src.height/2 + 25);
text("Sobel", src.width/2 + 10, src.height/2 + 25);
}


BIN
lib/opencv_processing/examples/FindEdges/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 47
- 0
lib/opencv_processing/examples/FindHistogram/FindHistogram.pde View File

@@ -0,0 +1,47 @@
import gab.opencv.*;

OpenCV opencv;
Histogram grayHist, rHist, gHist, bHist;

PImage img;

void setup() {
size(640, 400);
img = loadImage("test.jpg");
opencv = new OpenCV(this, img);

grayHist = opencv.findHistogram(opencv.getGray(), 256);
rHist = opencv.findHistogram(opencv.getR(), 256);
gHist = opencv.findHistogram(opencv.getG(), 256);
bHist = opencv.findHistogram(opencv.getB(), 256);
}

void draw() {
background(0);
image(img, 10, 0, 300, 200);

stroke(125); noFill();
rect(320, 10, 310, 180);
fill(125); noStroke();
grayHist.draw(320, 10, 310, 180);

stroke(255, 0, 0); noFill();
rect(10, height - 190, 200, 180);
fill(255, 0, 0); noStroke();
rHist.draw(10, height - 190, 200, 180);

stroke(0, 255, 0); noFill();
rect(220, height - 190, 200, 180);
fill(0, 255, 0); noStroke();
gHist.draw(220, height - 190, 200, 180);

stroke(0, 0, 255); noFill();
rect(430, height - 190, 200, 180);
fill(0, 0, 255); noStroke();
bHist.draw(430, height - 190, 200, 180);
}


BIN
lib/opencv_processing/examples/FindHistogram/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 107
- 0
lib/opencv_processing/examples/HSVColorTracking/HSVColorTracking.pde View File

@@ -0,0 +1,107 @@
/**
* HSVColorTracking
* Greg Borenstein
* https://github.com/atduskgreg/opencv-processing-book/blob/master/code/hsv_color_tracking/HSVColorTracking/HSVColorTracking.pde
*
* Modified by Jordi Tost @jorditost (color selection)
*
* University of Applied Sciences Potsdam, 2014
*/
import gab.opencv.*;
import processing.video.*;
import java.awt.Rectangle;

Capture video;
OpenCV opencv;
PImage src, colorFilteredImage;
ArrayList<Contour> contours;

// <1> Set the range of Hue values for our filter
int rangeLow = 20;
int rangeHigh = 35;

void setup() {
video = new Capture(this, 640, 480);
video.start();
opencv = new OpenCV(this, video.width, video.height);
contours = new ArrayList<Contour>();
size(2*opencv.width, opencv.height, P2D);
}

void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}

// <2> Load the new frame of our movie in to OpenCV
opencv.loadImage(video);
// Tell OpenCV to use color information
opencv.useColor();
src = opencv.getSnapshot();
// <3> Tell OpenCV to work in HSV color space.
opencv.useColor(HSB);
// <4> Copy the Hue channel of our image into
// the gray channel, which we process.
opencv.setGray(opencv.getH().clone());
// <5> Filter the image based on the range of
// hue values that match the object we want to track.
opencv.inRange(rangeLow, rangeHigh);
// <6> Get the processed image for reference.
colorFilteredImage = opencv.getSnapshot();
///////////////////////////////////////////
// We could process our image here!
// See ImageFiltering.pde
///////////////////////////////////////////
// <7> Find contours in our range image.
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// <8> Display background images
image(src, 0, 0);
image(colorFilteredImage, src.width, 0);
// <9> Check to make sure we've found any contours
if (contours.size() > 0) {
// <9> Get the first contour, which will be the largest one
Contour biggestContour = contours.get(0);
// <10> Find the bounding box of the largest contour,
// and hence our object.
Rectangle r = biggestContour.getBoundingBox();
// <11> Draw the bounding box of our object
noFill();
strokeWeight(2);
stroke(255, 0, 0);
rect(r.x, r.y, r.width, r.height);
// <12> Draw a dot in the middle of the bounding box, on the object.
noStroke();
fill(255, 0, 0);
ellipse(r.x + r.width/2, r.y + r.height/2, 30, 30);
}
}

void mousePressed() {
color c = get(mouseX, mouseY);
println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c));
int hue = int(map(hue(c), 0, 255, 0, 180));
println("hue to detect: " + hue);
rangeLow = hue - 5;
rangeHigh = hue + 5;
}

BIN
lib/opencv_processing/examples/HSVColorTracking/screenshots/hsv_color_tracking.png View File

Before After
Width: 1394  |  Height: 616  |  Size: 596KB

+ 61
- 0
lib/opencv_processing/examples/HistogramSkinDetection/HistogramSkinDetection.pde View File

@@ -0,0 +1,61 @@
import gab.opencv.*;

import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.CvType;
import org.opencv.imgproc.Imgproc;

OpenCV opencv;
PImage src,dst, hist, histMask;

Mat skinHistogram;

void setup(){
src = loadImage("test.jpg");
src.resize(src.width/2, 0);
size(src.width*2 + 256, src.height);
// third argument is: useColor
opencv = new OpenCV(this, src, true);

skinHistogram = Mat.zeros(256, 256, CvType.CV_8UC1);
Core.ellipse(skinHistogram, new Point(113.0, 155.6), new Size(40.0, 25.2), 43.0, 0.0, 360.0, new Scalar(255, 255, 255), Core.FILLED);

histMask = createImage(256,256, ARGB);
opencv.toPImage(skinHistogram, histMask);
hist = loadImage("cb-cr.png");
hist.blend(histMask, 0,0,256,256,0,0,256,256, ADD);
dst = opencv.getOutput();
dst.loadPixels();
for(int i = 0; i < dst.pixels.length; i++){
Mat input = new Mat(new Size(1, 1), CvType.CV_8UC3);
input.setTo(colorToScalar(dst.pixels[i]));
Mat output = opencv.imitate(input);
Imgproc.cvtColor(input, output, Imgproc.COLOR_BGR2YCrCb );
double[] inputComponents = output.get(0,0);
if(skinHistogram.get((int)inputComponents[1], (int)inputComponents[2])[0] > 0){
dst.pixels[i] = color(255);
} else {
dst.pixels[i] = color(0);
}
}
dst.updatePixels();
}

// in BGR
Scalar colorToScalar(color c){
return new Scalar(blue(c), green(c), red(c));
}


void draw(){
image(src,0,0);
image(dst, src.width, 0);
image(hist, src.width*2, 0);
}

BIN
lib/opencv_processing/examples/HistogramSkinDetection/data/cb-cr.png View File

Before After
Width: 256  |  Height: 256  |  Size: 46KB

BIN
lib/opencv_processing/examples/HistogramSkinDetection/data/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 38
- 0
lib/opencv_processing/examples/HoughLineDetection/HoughLineDetection.pde View File

@@ -0,0 +1,38 @@
import gab.opencv.*;

OpenCV opencv;
ArrayList<Line> lines;

void setup() {
PImage src = loadImage("film_scan.jpg");
src.resize(0, 800);
size(src.width, src.height);

opencv = new OpenCV(this, src);
opencv.findCannyEdges(20, 75);

// Find lines with Hough line detection
// Arguments are: threshold, minLengthLength, maxLineGap
lines = opencv.findLines(100, 30, 20);
}

void draw() {
image(opencv.getOutput(), 0, 0);
strokeWeight(3);
for (Line line : lines) {
// lines include angle in radians, measured in double precision
// so we can select out vertical and horizontal lines
// They also include "start" and "end" PVectors with the position
if (line.angle >= radians(0) && line.angle < radians(1)) {
stroke(0, 255, 0);
line(line.start.x, line.start.y, line.end.x, line.end.y);
}

if (line.angle > radians(89) && line.angle < radians(91)) {
stroke(255, 0, 0);
line(line.start.x, line.start.y, line.end.x, line.end.y);
}
}
}


BIN
lib/opencv_processing/examples/HoughLineDetection/film_scan.jpg View File

Before After
Width: 3432  |  Height: 3449  |  Size: 1.9MB

+ 64
- 0
lib/opencv_processing/examples/HueRangeSelection/HueRangeSelection.pde View File

@@ -0,0 +1,64 @@
import gab.opencv.*;

PImage img;
OpenCV opencv;
Histogram histogram;

int lowerb = 50;
int upperb = 100;

void setup() {
img = loadImage("colored_balls.jpg");
opencv = new OpenCV(this, img);
size(opencv.width, opencv.height);
opencv.useColor(HSB);
}

void draw() {
opencv.loadImage(img);
image(img, 0, 0);
opencv.setGray(opencv.getH().clone());
opencv.inRange(lowerb, upperb);
histogram = opencv.findHistogram(opencv.getH(), 255);

image(opencv.getOutput(), 3*width/4, 3*height/4, width/4,height/4);

noStroke(); fill(0);
histogram.draw(10, height - 230, 400, 200);
noFill(); stroke(0);
line(10, height-30, 410, height-30);

text("Hue", 10, height - (textAscent() + textDescent()));

float lb = map(lowerb, 0, 255, 0, 400);
float ub = map(upperb, 0, 255, 0, 400);

stroke(255, 0, 0); fill(255, 0, 0);
strokeWeight(2);
line(lb + 10, height-30, ub +10, height-30);
ellipse(lb+10, height-30, 3, 3 );
text(lowerb, lb-10, height-15);
ellipse(ub+10, height-30, 3, 3 );
text(upperb, ub+10, height-15);
}

void mouseMoved() {
if (keyPressed) {
upperb += mouseX - pmouseX;
}
else {
if (upperb < 255 || (mouseX - pmouseX) < 0) {
lowerb += mouseX - pmouseX;
}

if (lowerb > 0 || (mouseX - pmouseX) > 0) {
upperb += mouseX - pmouseX;
}
}

upperb = constrain(upperb, lowerb, 255);
lowerb = constrain(lowerb, 0, upperb-1);
}


BIN
lib/opencv_processing/examples/HueRangeSelection/colored_balls.jpg View File

Before After
Width: 1024  |  Height: 768  |  Size: 268KB

BIN
lib/opencv_processing/examples/HueRangeSelection/rainbow.jpg View File

Before After
Width: 1280  |  Height: 800  |  Size: 168KB

+ 38
- 0
lib/opencv_processing/examples/ImageDiff/ImageDiff.pde View File

@@ -0,0 +1,38 @@
import gab.opencv.*;

OpenCV opencv;
PImage before, after, grayDiff;
//PImage colorDiff;
void setup() {
before = loadImage("before.jpg");
after = loadImage("after.jpg");
size(before.width, before.height);

opencv = new OpenCV(this, before);
opencv.diff(after);
grayDiff = opencv.getSnapshot();

// opencv.useColor();
// opencv.loadImage(after);
// opencv.diff(after);
// colorDiff = opencv.getSnapshot();
}

void draw() {
pushMatrix();
scale(0.5);
image(before, 0, 0);
image(after, before.width, 0);
// image(colorDiff, 0, before.height);
image(grayDiff, before.width, before.height);
popMatrix();

fill(255);
text("before", 10, 20);
text("after", before.width/2 +10, 20);
text("gray diff", before.width/2 + 10, before.height/2+ 20);

// text("color diff", 10, before.height/2+ 20);
}


BIN
lib/opencv_processing/examples/ImageDiff/after.jpg View File

Before After
Width: 640  |  Height: 480  |  Size: 128KB

BIN
lib/opencv_processing/examples/ImageDiff/before.jpg View File

Before After
Width: 640  |  Height: 480  |  Size: 132KB

+ 297
- 0
lib/opencv_processing/examples/ImageFiltering/ImageFiltering.pde View File

@@ -0,0 +1,297 @@
/**
* Image Filtering
* This sketch performs some image filtering (threshold, blur) and contour detection
*
* @author: Jordi Tost (@jorditost)
* @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFiltering
*
* University of Applied Sciences Potsdam, 2014
*
* It requires the ControlP5 Processing library:
* http://www.sojamo.de/libraries/controlP5/
*/
import gab.opencv.*;
import java.awt.Rectangle;
import processing.video.*;
import controlP5.*;

OpenCV opencv;
Capture video;
PImage src, preProcessedImage, processedImage, contoursImage;
ArrayList<Contour> contours;

float contrast = 1.35;
int brightness = 0;
int threshold = 75;
boolean useAdaptiveThreshold = false; // use basic thresholding
int thresholdBlockSize = 489;
int thresholdConstant = 45;
int blobSizeThreshold = 20;
int blurSize = 4;

// Control vars
ControlP5 cp5;
int buttonColor;
int buttonBgColor;

void setup() {
frameRate(15);
video = new Capture(this, 640, 480);
video.start();
opencv = new OpenCV(this, 640, 480);
contours = new ArrayList<Contour>();
size(opencv.width + 200, opencv.height, P2D);
// Init Controls
cp5 = new ControlP5(this);
initControls();
// Set thresholding
toggleAdaptiveThreshold(useAdaptiveThreshold);
}

void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// Load the new frame of our camera in to OpenCV
opencv.loadImage(video);
src = opencv.getSnapshot();
///////////////////////////////
// <1> PRE-PROCESS IMAGE
// - Grey channel
// - Brightness / Contrast
///////////////////////////////
// Gray channel
opencv.gray();
//opencv.brightness(brightness);
opencv.contrast(contrast);
// Save snapshot for display
preProcessedImage = opencv.getSnapshot();
///////////////////////////////
// <2> PROCESS IMAGE
// - Threshold
// - Noise Supression
///////////////////////////////
// Adaptive threshold - Good when non-uniform illumination
if (useAdaptiveThreshold) {
// Block size must be odd and greater than 3
if (thresholdBlockSize%2 == 0) thresholdBlockSize++;
if (thresholdBlockSize < 3) thresholdBlockSize = 3;
opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant);
// Basic threshold - range [0, 255]
} else {
opencv.threshold(threshold);
}

// Invert (black bg, white blobs)
opencv.invert();
// Reduce noise - Dilate and erode to close holes
opencv.dilate();
opencv.erode();
// Blur
opencv.blur(blurSize);
// Save snapshot for display
processedImage = opencv.getSnapshot();
///////////////////////////////
// <3> FIND CONTOURS
///////////////////////////////
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// Save snapshot for display
contoursImage = opencv.getSnapshot();
// Draw
pushMatrix();
// Leave space for ControlP5 sliders
translate(width-src.width, 0);
// Display images
displayImages();
// Display contours in the lower right window
pushMatrix();
scale(0.5);
translate(src.width, src.height);
displayContours();
displayContoursBoundingBoxes();
popMatrix();
popMatrix();
}

/////////////////////
// Display Methods
/////////////////////

void displayImages() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(preProcessedImage, src.width, 0);
image(processedImage, 0, src.height);
image(src, src.width, src.height);
popMatrix();
stroke(255);
fill(255);
text("Source", 10, 25);
text("Pre-processed Image", src.width/2 + 10, 25);
text("Processed Image", 10, src.height/2 + 25);
text("Tracked Points", src.width/2 + 10, src.height/2 + 25);
}

void displayContours() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
noFill();
stroke(0, 255, 0);
strokeWeight(3);
contour.draw();
}
}

void displayContoursBoundingBoxes() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
stroke(255, 0, 0);
fill(255, 0, 0, 150);
strokeWeight(2);
rect(r.x, r.y, r.width, r.height);
}
}

//////////////////////////
// CONTROL P5 Functions
//////////////////////////

void initControls() {
// Slider for contrast
cp5.addSlider("contrast")
.setLabel("contrast")
.setPosition(20,50)
.setRange(0.0,6.0)
;
// Slider for threshold
cp5.addSlider("threshold")
.setLabel("threshold")
.setPosition(20,110)
.setRange(0,255)
;
// Toggle to activae adaptive threshold
cp5.addToggle("toggleAdaptiveThreshold")
.setLabel("use adaptive threshold")
.setSize(10,10)
.setPosition(20,144)
;
// Slider for adaptive threshold block size
cp5.addSlider("thresholdBlockSize")
.setLabel("a.t. block size")
.setPosition(20,180)
.setRange(1,700)
;
// Slider for adaptive threshold constant
cp5.addSlider("thresholdConstant")
.setLabel("a.t. constant")
.setPosition(20,200)
.setRange(-100,100)
;
// Slider for blur size
cp5.addSlider("blurSize")
.setLabel("blur size")
.setPosition(20,260)
.setRange(1,20)
;
// Slider for minimum blob size
cp5.addSlider("blobSizeThreshold")
.setLabel("min blob size")
.setPosition(20,290)
.setRange(0,60)
;
// Store the default background color, we gonna need it later
buttonColor = cp5.getController("contrast").getColor().getForeground();
buttonBgColor = cp5.getController("contrast").getColor().getBackground();
}

void toggleAdaptiveThreshold(boolean theFlag) {
useAdaptiveThreshold = theFlag;
if (useAdaptiveThreshold) {
// Lock basic threshold
setLock(cp5.getController("threshold"), true);
// Unlock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), false);
setLock(cp5.getController("thresholdConstant"), false);
} else {
// Unlock basic threshold
setLock(cp5.getController("threshold"), false);
// Lock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), true);
setLock(cp5.getController("thresholdConstant"), true);
}
}

void setLock(Controller theController, boolean theValue) {
theController.setLock(theValue);
if (theValue) {
theController.setColorBackground(color(150,150));
theController.setColorForeground(color(100,100));
} else {
theController.setColorBackground(color(buttonBgColor));
theController.setColorForeground(color(buttonColor));
}
}


BIN
lib/opencv_processing/examples/ImageFiltering/screenshots/objects_basic_threshold.png View File

Before After
Width: 954  |  Height: 616  |  Size: 268KB

BIN
lib/opencv_processing/examples/ImageFiltering/screenshots/touch_adaptive_threshold.png View File

Before After
Width: 954  |  Height: 616  |  Size: 221KB

+ 90
- 0
lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/Blob.pde View File

@@ -0,0 +1,90 @@
/**
* Blob Class
*
* Based on this example by Daniel Shiffman:
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* @author: Jordi Tost (@jorditost)
*
* University of Applied Sciences Potsdam, 2014
*/

class Blob {
private PApplet parent;
// Contour
public Contour contour;
// Am I available to be matched?
public boolean available;
// Should I be deleted?
public boolean delete;
// How long should I live if I have disappeared?
private int initTimer = 5; //127;
public int timer;
// Unique ID for each blob
int id;
// Make me
Blob(PApplet parent, int id, Contour c) {
this.parent = parent;
this.id = id;
this.contour = new Contour(parent, c.pointMat);
available = true;
delete = false;
timer = initTimer;
}
// Show me
void display() {
Rectangle r = contour.getBoundingBox();
float opacity = map(timer, 0, initTimer, 0, 127);
fill(0,0,255,opacity);
stroke(0,0,255);
rect(r.x, r.y, r.width, r.height);
fill(255,2*opacity);
textSize(26);
text(""+id, r.x+10, r.y+30);
}

// Give me a new contour for this blob (shape, points, location, size)
// Oooh, it would be nice to lerp here!
void update(Contour newC) {
contour = new Contour(parent, newC.pointMat);
// Is there a way to update the contour's points without creating a new one?
/*ArrayList<PVector> newPoints = newC.getPoints();
Point[] inputPoints = new Point[newPoints.size()];
for(int i = 0; i < newPoints.size(); i++){
inputPoints[i] = new Point(newPoints.get(i).x, newPoints.get(i).y);
}
contour.loadPoints(inputPoints);*/
timer = initTimer;
}

// Count me down, I am gone
void countDown() {
timer--;
}

// I am deed, delete me
boolean dead() {
if (timer < 0) return true;
return false;
}
public Rectangle getBoundingBox() {
return contour.getBoundingBox();
}
}


+ 455
- 0
lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/ImageFilteringWithBlobPersistence.pde View File

@@ -0,0 +1,455 @@
/**
* Image Filtering
* This sketch will help us to adjust the filter values to optimize blob detection
*
* Persistence algorithm by Daniel Shifmann:
* http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/
*
* @author: Jordi Tost (@jorditost)
* @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFilteringWithBlobPersistence
*
* University of Applied Sciences Potsdam, 2014
*
* It requires the ControlP5 Processing library:
* http://www.sojamo.de/libraries/controlP5/
*/
import gab.opencv.*;
import java.awt.Rectangle;
import processing.video.*;
import controlP5.*;

OpenCV opencv;
Capture video;
PImage src, preProcessedImage, processedImage, contoursImage;

ArrayList<Contour> contours;

// List of detected contours parsed as blobs (every frame)
ArrayList<Contour> newBlobContours;

// List of my blob objects (persistent)
ArrayList<Blob> blobList;


// Number of blobs detected over all time. Used to set IDs.
int blobCount = 0;

float contrast = 1.35;
int brightness = 0;
int threshold = 75;
boolean useAdaptiveThreshold = false; // use basic thresholding
int thresholdBlockSize = 489;
int thresholdConstant = 45;
int blobSizeThreshold = 20;
int blurSize = 4;

// Control vars
ControlP5 cp5;
int buttonColor;
int buttonBgColor;

void setup() {
frameRate(15);
video = new Capture(this, 640, 480);
//video = new Capture(this, 640, 480, "USB2.0 PC CAMERA");
video.start();
opencv = new OpenCV(this, 640, 480);
contours = new ArrayList<Contour>();
// Blobs list
blobList = new ArrayList<Blob>();
size(opencv.width + 200, opencv.height, P2D);
// Init Controls
cp5 = new ControlP5(this);
initControls();
// Set thresholding
toggleAdaptiveThreshold(useAdaptiveThreshold);
}

void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// Load the new frame of our camera in to OpenCV
opencv.loadImage(video);
src = opencv.getSnapshot();
///////////////////////////////
// <1> PRE-PROCESS IMAGE
// - Grey channel
// - Brightness / Contrast
///////////////////////////////
// Gray channel
opencv.gray();
//opencv.brightness(brightness);
opencv.contrast(contrast);
// Save snapshot for display
preProcessedImage = opencv.getSnapshot();
///////////////////////////////
// <2> PROCESS IMAGE
// - Threshold
// - Noise Supression
///////////////////////////////
// Adaptive threshold - Good when non-uniform illumination
if (useAdaptiveThreshold) {
// Block size must be odd and greater than 3
if (thresholdBlockSize%2 == 0) thresholdBlockSize++;
if (thresholdBlockSize < 3) thresholdBlockSize = 3;
opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant);
// Basic threshold - range [0, 255]
} else {
opencv.threshold(threshold);
}

// Invert (black bg, white blobs)
opencv.invert();
// Reduce noise - Dilate and erode to close holes
opencv.dilate();
opencv.erode();
// Blur
opencv.blur(blurSize);
// Save snapshot for display
processedImage = opencv.getSnapshot();
///////////////////////////////
// <3> FIND CONTOURS
///////////////////////////////
detectBlobs();
// Passing 'true' sorts them by descending area.
//contours = opencv.findContours(true, true);
// Save snapshot for display
contoursImage = opencv.getSnapshot();
// Draw
pushMatrix();
// Leave space for ControlP5 sliders
translate(width-src.width, 0);
// Display images
displayImages();
// Display contours in the lower right window
pushMatrix();
scale(0.5);
translate(src.width, src.height);
// Contours
//displayContours();
//displayContoursBoundingBoxes();
// Blobs
displayBlobs();
popMatrix();
popMatrix();
}

///////////////////////
// Display Functions
///////////////////////

void displayImages() {
pushMatrix();
scale(0.5);
image(src, 0, 0);
image(preProcessedImage, src.width, 0);
image(processedImage, 0, src.height);
image(src, src.width, src.height);
popMatrix();
stroke(255);
fill(255);
textSize(12);
text("Source", 10, 25);
text("Pre-processed Image", src.width/2 + 10, 25);
text("Processed Image", 10, src.height/2 + 25);
text("Tracked Points", src.width/2 + 10, src.height/2 + 25);
}

void displayBlobs() {
for (Blob b : blobList) {
strokeWeight(1);
b.display();
}
}

void displayContours() {
// Contours
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
noFill();
stroke(0, 255, 0);
strokeWeight(3);
contour.draw();
}
}

void displayContoursBoundingBoxes() {
for (int i=0; i<contours.size(); i++) {
Contour contour = contours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
stroke(255, 0, 0);
fill(255, 0, 0, 150);
strokeWeight(2);
rect(r.x, r.y, r.width, r.height);
}
}

////////////////////
// Blob Detection
////////////////////

void detectBlobs() {
// Contours detected in this frame
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
newBlobContours = getBlobsFromContours(contours);
//println(contours.length);
// Check if the detected blobs already exist are new or some has disappeared.
// SCENARIO 1
// blobList is empty
if (blobList.isEmpty()) {
// Just make a Blob object for every face Rectangle
for (int i = 0; i < newBlobContours.size(); i++) {
println("+++ New blob detected with ID: " + blobCount);
blobList.add(new Blob(this, blobCount, newBlobContours.get(i)));
blobCount++;
}
// SCENARIO 2
// We have fewer Blob objects than face Rectangles found from OpenCV in this frame
} else if (blobList.size() <= newBlobContours.size()) {
boolean[] used = new boolean[newBlobContours.size()];
// Match existing Blob objects with a Rectangle
for (Blob b : blobList) {
// Find the new blob newBlobContours.get(index) that is closest to blob b
// set used[index] to true so that it can't be used twice
float record = 50000;
int index = -1;
for (int i = 0; i < newBlobContours.size(); i++) {
float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y);
//float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y);
if (d < record && !used[i]) {
record = d;
index = i;
}
}
// Update Blob object location
used[index] = true;
b.update(newBlobContours.get(index));
}
// Add any unused blobs
for (int i = 0; i < newBlobContours.size(); i++) {
if (!used[i]) {
println("+++ New blob detected with ID: " + blobCount);
blobList.add(new Blob(this, blobCount, newBlobContours.get(i)));
//blobList.add(new Blob(blobCount, blobs[i].x, blobs[i].y, blobs[i].width, blobs[i].height));
blobCount++;
}
}
// SCENARIO 3
// We have more Blob objects than blob Rectangles found from OpenCV in this frame
} else {
// All Blob objects start out as available
for (Blob b : blobList) {
b.available = true;
}
// Match Rectangle with a Blob object
for (int i = 0; i < newBlobContours.size(); i++) {
// Find blob object closest to the newBlobContours.get(i) Contour
// set available to false
float record = 50000;
int index = -1;
for (int j = 0; j < blobList.size(); j++) {
Blob b = blobList.get(j);
float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y);
//float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y);
if (d < record && b.available) {
record = d;
index = j;
}
}
// Update Blob object location
Blob b = blobList.get(index);
b.available = false;
b.update(newBlobContours.get(i));
}
// Start to kill any left over Blob objects
for (Blob b : blobList) {
if (b.available) {
b.countDown();
if (b.dead()) {
b.delete = true;
}
}
}
}
// Delete any blob that should be deleted
for (int i = blobList.size()-1; i >= 0; i--) {
Blob b = blobList.get(i);
if (b.delete) {
blobList.remove(i);
}
}
}

ArrayList<Contour> getBlobsFromContours(ArrayList<Contour> newContours) {
ArrayList<Contour> newBlobs = new ArrayList<Contour>();
// Which of these contours are blobs?
for (int i=0; i<newContours.size(); i++) {
Contour contour = newContours.get(i);
Rectangle r = contour.getBoundingBox();
if (//(contour.area() > 0.9 * src.width * src.height) ||
(r.width < blobSizeThreshold || r.height < blobSizeThreshold))
continue;
newBlobs.add(contour);
}
return newBlobs;
}

//////////////////////////
// CONTROL P5 Functions
//////////////////////////

void initControls() {
// Slider for contrast
cp5.addSlider("contrast")
.setLabel("contrast")
.setPosition(20,50)
.setRange(0.0,6.0)
;
// Slider for threshold
cp5.addSlider("threshold")
.setLabel("threshold")
.setPosition(20,110)
.setRange(0,255)
;
// Toggle to activae adaptive threshold
cp5.addToggle("toggleAdaptiveThreshold")
.setLabel("use adaptive threshold")
.setSize(10,10)
.setPosition(20,144)
;
// Slider for adaptive threshold block size
cp5.addSlider("thresholdBlockSize")
.setLabel("a.t. block size")
.setPosition(20,180)
.setRange(1,700)
;
// Slider for adaptive threshold constant
cp5.addSlider("thresholdConstant")
.setLabel("a.t. constant")
.setPosition(20,200)
.setRange(-100,100)
;
// Slider for blur size
cp5.addSlider("blurSize")
.setLabel("blur size")
.setPosition(20,260)
.setRange(1,20)
;
// Slider for minimum blob size
cp5.addSlider("blobSizeThreshold")
.setLabel("min blob size")
.setPosition(20,290)
.setRange(0,60)
;
// Store the default background color, we gonna need it later
buttonColor = cp5.getController("contrast").getColor().getForeground();
buttonBgColor = cp5.getController("contrast").getColor().getBackground();
}

void toggleAdaptiveThreshold(boolean theFlag) {
useAdaptiveThreshold = theFlag;
if (useAdaptiveThreshold) {
// Lock basic threshold
setLock(cp5.getController("threshold"), true);
// Unlock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), false);
setLock(cp5.getController("thresholdConstant"), false);
} else {
// Unlock basic threshold
setLock(cp5.getController("threshold"), false);
// Lock adaptive threshold
setLock(cp5.getController("thresholdBlockSize"), true);
setLock(cp5.getController("thresholdConstant"), true);
}
}

void setLock(Controller theController, boolean theValue) {
theController.setLock(theValue);
if (theValue) {
theController.setColorBackground(color(150,150));
theController.setColorForeground(color(100,100));
} else {
theController.setColorBackground(color(buttonBgColor));
theController.setColorForeground(color(buttonColor));
}
}


BIN
lib/opencv_processing/examples/ImageFilteringWithBlobPersistence/screenshots/blob_persistence.png View File

Before After
Width: 954  |  Height: 616  |  Size: 221KB

+ 38
- 0
lib/opencv_processing/examples/LiveCamTest/LiveCamTest.pde View File

@@ -0,0 +1,38 @@
import gab.opencv.*;
import processing.video.*;
import java.awt.*;

Capture video;
OpenCV opencv;

void setup() {
size(640, 480);
video = new Capture(this, 640/2, 480/2);
opencv = new OpenCV(this, 640/2, 480/2);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);

video.start();
}

void draw() {
scale(2);
opencv.loadImage(video);

image(video, 0, 0 );

noFill();
stroke(0, 255, 0);
strokeWeight(3);
Rectangle[] faces = opencv.detect();
println(faces.length);

for (int i = 0; i < faces.length; i++) {
println(faces[i].x + "," + faces[i].y);
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}

void captureEvent(Capture c) {
c.read();
}


+ 26161
- 0
lib/opencv_processing/examples/LiveCamTest/data/haarcascade_frontalface_alt.xml
File diff suppressed because it is too large
View File


+ 13
- 0
lib/opencv_processing/examples/LoadAndDisplayImage/LoadAndDisplayImage.pde View File

@@ -0,0 +1,13 @@
import gab.opencv.*;

OpenCV opencv;

void setup() {
opencv = new OpenCV(this, "test.jpg");
size(opencv.width, opencv.height);
}

void draw() {
image(opencv.getOutput(), 0, 0);
}


BIN
lib/opencv_processing/examples/LoadAndDisplayImage/data/test.jpg View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

BIN
lib/opencv_processing/examples/LoadAndDisplayImage/data/test.png View File

Before After
Width: 1080  |  Height: 720  |  Size: 100KB

+ 43
- 0
lib/opencv_processing/examples/LumaVsGray/LumaVsGray.pde View File

@@ -0,0 +1,43 @@
/*
Luma is a better measure of perceived brightness than
the tradition grayscale created by averaging R, G, and B channels.
This sketch demonstrates converting an image to LAB color space
and accessign the Luma channel for comparison with the more common
grayscale version. Uses un-wrapped OpenCV cvtColor() function.

*/