Calibrate works?

This commit is contained in:
jan 2016-07-10 00:36:44 -07:00
commit cc711c6343
1104 changed files with 636510 additions and 75 deletions

View file

@ -0,0 +1,140 @@
/**
* ASCII Video
* by Ben Fry.
*
*
* Text characters have been used to represent images since the earliest computers.
* This sketch is a simple homage that re-interprets live video as ASCII text.
* See the keyPressed function for more options, like changing the font size.
*/
import processing.video.*;
Capture video;
boolean cheatScreen;
// All ASCII characters, sorted according to their visual density
String letterOrder =
" .`-_':,;^=+/\"|)\\<>)iv%xclrs{*}I?!][1taeo7zjLu" +
"nT#JCwfy325Fp6mqSghVd4EgXPGZbYkOA&8U$@KHDBWNMR0Q";
char[] letters;
float[] bright;
char[] chars;
PFont font;
float fontSize = 1.5;
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 160, 120);
// Start capturing the images from the camera
video.start();
int count = video.width * video.height;
//println(count);
font = loadFont("UniversLTStd-Light-48.vlw");
// for the 256 levels of brightness, distribute the letters across
// the an array of 256 elements to use for the lookup
letters = new char[256];
for (int i = 0; i < 256; i++) {
int index = int(map(i, 0, 256, 0, letterOrder.length()));
letters[i] = letterOrder.charAt(index);
}
// current characters for each position in the video
chars = new char[count];
// current brightness for each point
bright = new float[count];
for (int i = 0; i < count; i++) {
// set each brightness at the midpoint to start
bright[i] = 128;
}
}
void captureEvent(Capture c) {
c.read();
}
void draw() {
background(0);
pushMatrix();
float hgap = width / float(video.width);
float vgap = height / float(video.height);
scale(max(hgap, vgap) * fontSize);
textFont(font, fontSize);
int index = 0;
video.loadPixels();
for (int y = 1; y < video.height; y++) {
// Move down for next line
translate(0, 1.0 / fontSize);
pushMatrix();
for (int x = 0; x < video.width; x++) {
int pixelColor = video.pixels[index];
// Faster method of calculating r, g, b than red(), green(), blue()
int r = (pixelColor >> 16) & 0xff;
int g = (pixelColor >> 8) & 0xff;
int b = pixelColor & 0xff;
// Another option would be to properly calculate brightness as luminance:
// luminance = 0.3*red + 0.59*green + 0.11*blue
// Or you could instead red + green + blue, and make the the values[] array
// 256*3 elements long instead of just 256.
int pixelBright = max(r, g, b);
// The 0.1 value is used to damp the changes so that letters flicker less
float diff = pixelBright - bright[index];
bright[index] += diff * 0.1;
fill(pixelColor);
int num = int(bright[index]);
text(letters[num], 0, 0);
// Move to the next pixel
index++;
// Move over for next character
translate(1.0 / fontSize, 0);
}
popMatrix();
}
popMatrix();
if (cheatScreen) {
//image(video, 0, height - video.height);
// set() is faster than image() when drawing untransformed images
set(0, height - video.height, video);
}
}
/**
* Handle key presses:
* 'c' toggles the cheat screen that shows the original image in the corner
* 'g' grabs an image and saves the frame to a tiff image
* 'f' and 'F' increase and decrease the font size
*/
void keyPressed() {
switch (key) {
case 'g': saveFrame(); break;
case 'c': cheatScreen = !cheatScreen; break;
case 'f': fontSize *= 1.1; break;
case 'F': fontSize *= 0.9; break;
}
}

View file

@ -0,0 +1,74 @@
/**
* Background Subtraction
* by Golan Levin.
*
* Detect the presence of people and objects in the frame using a simple
* background-subtraction technique. To initialize the background, press a key.
*/
import processing.video.*;
int numPixels;
int[] backgroundPixels;
Capture video;
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
//video = new Capture(this, 160, 120);
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
numPixels = video.width * video.height;
// Create array to store the background image
backgroundPixels = new int[numPixels];
// Make the pixels[] array available for direct manipulation
loadPixels();
}
void draw() {
if (video.available()) {
video.read(); // Read a new video frame
video.loadPixels(); // Make the pixels of video available
// Difference between the current frame and the stored background
int presenceSum = 0;
for (int i = 0; i < numPixels; i++) { // For each pixel in the video frame...
// Fetch the current color in that location, and also the color
// of the background in that spot
color currColor = video.pixels[i];
color bkgdColor = backgroundPixels[i];
// Extract the red, green, and blue components of the current pixel's color
int currR = (currColor >> 16) & 0xFF;
int currG = (currColor >> 8) & 0xFF;
int currB = currColor & 0xFF;
// Extract the red, green, and blue components of the background pixel's color
int bkgdR = (bkgdColor >> 16) & 0xFF;
int bkgdG = (bkgdColor >> 8) & 0xFF;
int bkgdB = bkgdColor & 0xFF;
// Compute the difference of the red, green, and blue values
int diffR = abs(currR - bkgdR);
int diffG = abs(currG - bkgdG);
int diffB = abs(currB - bkgdB);
// Add these differences to the running tally
presenceSum += diffR + diffG + diffB;
// Render the difference image to the screen
pixels[i] = color(diffR, diffG, diffB);
// The following line does the same thing much faster, but is more technical
//pixels[i] = 0xFF000000 | (diffR << 16) | (diffG << 8) | diffB;
}
updatePixels(); // Notify that the pixels[] array has changed
println(presenceSum); // Print out the total amount of movement
}
}
// When a key is pressed, capture the background image into the backgroundPixels
// buffer, by copying each of the current frame's pixels into it.
void keyPressed() {
video.loadPixels();
arraycopy(video.pixels, backgroundPixels);
}

View file

@ -0,0 +1,63 @@
/**
* Brightness Thresholding
* by Golan Levin.
*
* Determines whether a test location (such as the cursor) is contained within
* the silhouette of a dark object.
*/
import processing.video.*;
color black = color(0);
color white = color(255);
int numPixels;
Capture video;
void setup() {
size(640, 480); // Change size to 320 x 240 if too slow at 640 x 480
strokeWeight(5);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
numPixels = video.width * video.height;
noCursor();
smooth();
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
int threshold = 127; // Set the threshold value
float pixelBrightness; // Declare variable to store a pixel's color
// Turn each pixel in the video frame black or white depending on its brightness
loadPixels();
for (int i = 0; i < numPixels; i++) {
pixelBrightness = brightness(video.pixels[i]);
if (pixelBrightness > threshold) { // If the pixel is brighter than the
pixels[i] = white; // threshold value, make it white
}
else { // Otherwise,
pixels[i] = black; // make it black
}
}
updatePixels();
// Test a location to see where it is contained. Fetch the pixel at the test
// location (the cursor), and compute its brightness
int testValue = get(mouseX, mouseY);
float testBrightness = brightness(testValue);
if (testBrightness > threshold) { // If the test location is brighter than
fill(black); // the threshold set the fill to black
}
else { // Otherwise,
fill(white); // set the fill to white
}
ellipse(mouseX, mouseY, 20, 20);
}
}

View file

@ -0,0 +1,53 @@
/**
* Brightness Tracking
* by Golan Levin.
*
* Tracks the brightest pixel in a live video signal.
*/
import processing.video.*;
Capture video;
void setup() {
size(640, 480);
// Uses the default video input, see the reference if this causes an error
video = new Capture(this, width, height);
video.start();
noStroke();
smooth();
}
void draw() {
if (video.available()) {
video.read();
image(video, 0, 0, width, height); // Draw the webcam video onto the screen
int brightestX = 0; // X-coordinate of the brightest video pixel
int brightestY = 0; // Y-coordinate of the brightest video pixel
float brightestValue = 0; // Brightness of the brightest video pixel
// Search for the brightest pixel: For each row of pixels in the video image and
// for each pixel in the yth row, compute each pixel's index in the video
video.loadPixels();
int index = 0;
for (int y = 0; y < video.height; y++) {
for (int x = 0; x < video.width; x++) {
// Get the color stored in the pixel
int pixelValue = video.pixels[index];
// Determine the brightness of the pixel
float pixelBrightness = brightness(pixelValue);
// If that value is brighter than any previous, then store the
// brightness of that pixel, as well as its (x,y) location
if (pixelBrightness > brightestValue) {
brightestValue = pixelBrightness;
brightestY = y;
brightestX = x;
}
index++;
}
}
// Draw a large, yellow circle at the brightest pixel
fill(255, 204, 0, 128);
ellipse(brightestX, brightestY, 200, 200);
}
}

View file

@ -0,0 +1,146 @@
/**
* Color Sorting
* by Ben Fry.
*
* Example that sorts all colors from the incoming video
* and arranges them into vertical bars.
*/
import processing.video.*;
Capture video;
boolean cheatScreen;
Tuple[] captureColors;
Tuple[] drawColors;
int[] bright;
// How many pixels to skip in either direction
int increment = 5;
void setup() {
size(800, 600);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 160, 120);
// Start capturing the images from the camera
video.start();
int count = (video.width * video.height) / (increment * increment);
bright = new int[count];
captureColors = new Tuple[count];
drawColors = new Tuple[count];
for (int i = 0; i < count; i++) {
captureColors[i] = new Tuple();
drawColors[i] = new Tuple(0.5, 0.5, 0.5);
}
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
background(0);
noStroke();
int index = 0;
for (int j = 0; j < video.height; j += increment) {
for (int i = 0; i < video.width; i += increment) {
int pixelColor = video.pixels[j*video.width + i];
int r = (pixelColor >> 16) & 0xff;
int g = (pixelColor >> 8) & 0xff;
int b = pixelColor & 0xff;
// Technically would be sqrt of the following, but no need to do
// sqrt before comparing the elements since we're only ordering
bright[index] = r*r + g*g + b*b;
captureColors[index].set(r, g, b);
index++;
}
}
sort(index, bright, captureColors);
beginShape(QUAD_STRIP);
for (int i = 0; i < index; i++) {
drawColors[i].target(captureColors[i], 0.1);
drawColors[i].phil();
float x = map(i, 0, index, 0, width);
vertex(x, 0);
vertex(x, height);
}
endShape();
if (cheatScreen) {
//image(video, 0, height - video.height);
// Faster method of displaying pixels array on screen
set(0, height - video.height, video);
}
}
}
void keyPressed() {
if (key == 'g') {
saveFrame();
} else if (key == 'c') {
cheatScreen = !cheatScreen;
}
}
// Functions to handle sorting the color data
void sort(int length, int[] a, Tuple[] stuff) {
sortSub(a, stuff, 0, length - 1);
}
void sortSwap(int[] a, Tuple[] stuff, int i, int j) {
int T = a[i];
a[i] = a[j];
a[j] = T;
Tuple v = stuff[i];
stuff[i] = stuff[j];
stuff[j] = v;
}
void sortSub(int[] a, Tuple[] stuff, int lo0, int hi0) {
int lo = lo0;
int hi = hi0;
int mid;
if (hi0 > lo0) {
mid = a[(lo0 + hi0) / 2];
while (lo <= hi) {
while ((lo < hi0) && (a[lo] < mid)) {
++lo;
}
while ((hi > lo0) && (a[hi] > mid)) {
--hi;
}
if (lo <= hi) {
sortSwap(a, stuff, lo, hi);
++lo;
--hi;
}
}
if (lo0 < hi)
sortSub(a, stuff, lo0, hi);
if (lo < hi0)
sortSub(a, stuff, lo, hi0);
}
}

View file

@ -0,0 +1,29 @@
// Simple vector class that holds an x,y,z position.
class Tuple {
float x, y, z;
Tuple() { }
Tuple(float x, float y, float z) {
set(x, y, z);
}
void set(float x, float y, float z) {
this.x = x;
this.y = y;
this.z = z;
}
void target(Tuple another, float amount) {
float amount1 = 1.0 - amount;
x = x*amount1 + another.x*amount;
y = y*amount1 + another.y*amount;
z = z*amount1 + another.z*amount;
}
void phil() {
fill(x, y, z);
}
}

View file

@ -0,0 +1,70 @@
/**
* Frame Differencing
* by Golan Levin.
*
* Quantify the amount of movement in the video frame using frame-differencing.
*/
import processing.video.*;
int numPixels;
int[] previousFrame;
Capture video;
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
numPixels = video.width * video.height;
// Create an array to store the previously captured frame
previousFrame = new int[numPixels];
loadPixels();
}
void draw() {
if (video.available()) {
// When using video to manipulate the screen, use video.available() and
// video.read() inside the draw() method so that it's safe to draw to the screen
video.read(); // Read the new frame from the camera
video.loadPixels(); // Make its pixels[] array available
int movementSum = 0; // Amount of movement in the frame
for (int i = 0; i < numPixels; i++) { // For each pixel in the video frame...
color currColor = video.pixels[i];
color prevColor = previousFrame[i];
// Extract the red, green, and blue components from current pixel
int currR = (currColor >> 16) & 0xFF; // Like red(), but faster
int currG = (currColor >> 8) & 0xFF;
int currB = currColor & 0xFF;
// Extract red, green, and blue components from previous pixel
int prevR = (prevColor >> 16) & 0xFF;
int prevG = (prevColor >> 8) & 0xFF;
int prevB = prevColor & 0xFF;
// Compute the difference of the red, green, and blue values
int diffR = abs(currR - prevR);
int diffG = abs(currG - prevG);
int diffB = abs(currB - prevB);
// Add these differences to the running tally
movementSum += diffR + diffG + diffB;
// Render the difference image to the screen
pixels[i] = color(diffR, diffG, diffB);
// The following line is much faster, but more confusing to read
//pixels[i] = 0xff000000 | (diffR << 16) | (diffG << 8) | diffB;
// Save the current color into the 'previous' buffer
previousFrame[i] = currColor;
}
// To prevent flicker from frames that are all black (no movement),
// only update the screen if the image has changed.
if (movementSum > 0) {
updatePixels();
println(movementSum); // Print the total amount of movement to the console
}
}
}

View file

@ -0,0 +1,62 @@
/**
* Framingham
* by Ben Fry.
*
* Show subsequent frames from video input as a grid. Also fun with movie files.
*/
import processing.video.*;
Capture video;
int column;
int columnCount;
int lastRow;
// Buffer used to move all the pixels up
int[] scoot;
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 160, 120);
// Start capturing the images from the camera
video.start();
column = 0;
columnCount = width / video.width;
int rowCount = height / video.height;
lastRow = rowCount - 1;
scoot = new int[lastRow*video.height * width];
background(0);
}
void draw() {
// By using video.available, only the frame rate need be set inside setup()
if (video.available()) {
video.read();
video.loadPixels();
image(video, video.width*column, video.height*lastRow);
column++;
if (column == columnCount) {
loadPixels();
// Scoot everybody up one row
arrayCopy(pixels, video.height*width, scoot, 0, scoot.length);
arrayCopy(scoot, 0, pixels, 0, scoot.length);
// Set the moved row to black
for (int i = scoot.length; i < width*height; i++) {
pixels[i] = #000000;
}
column = 0;
updatePixels();
}
}
}

View file

@ -0,0 +1,46 @@
/**
* Getting Started with Capture.
*
* Reading and displaying an image from an attached Capture device.
*/
import processing.video.*;
Capture cam;
void setup() {
size(640, 480);
String[] cameras = Capture.list();
if (cameras == null) {
println("Failed to retrieve the list of available cameras, will try the default...");
cam = new Capture(this, 640, 480);
} if (cameras.length == 0) {
println("There are no cameras available for capture.");
exit();
} else {
println("Available cameras:");
printArray(cameras);
// The camera can be initialized directly using an element
// from the array returned by list():
cam = new Capture(this, cameras[0]);
// Or, the settings can be defined based on the text in the list
//cam = new Capture(this, 640, 480, "Built-in iSight", 30);
// Start capturing the images from the camera
cam.start();
}
}
void draw() {
if (cam.available() == true) {
cam.read();
}
image(cam, 0, 0, width, height);
// The following does the same as the above image() line, but
// is faster when just drawing the image without any additional
// resizing, transformations, or tint.
//set(0, 0, cam);
}

View file

@ -0,0 +1,213 @@
/**
* HSV Space
* by Ben Fry.
*
* Arrange the pixels from live video into the HSV Color Cone.
*/
import processing.video.*;
import java.awt.Color;
Capture video;
int count;
boolean cheatScreen = true;
static final float BOX_SIZE = 0.75;
static final float CONE_HEIGHT = 1.2;
static final float MAX_RADIUS = 10;
static final float ROT_INCREMENT = 3.0;
static final float TRANS_INCREMENT = 1;
static final float STEP_AMOUNT = 0.1;
Tuple[] farbe;
Tuple[] trans;
float[] hsb = new float[3];
float leftRightAngle;
float upDownAngle;
float fwdBackTrans;
float upDownTrans;
float leftRightTrans;
boolean motion;
boolean blobby = false;
void setup() {
size(640, 480, P3D);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 160, 120);
// Start capturing the images from the camera
video.start();
count = video.width * video.height;
sphereDetail(60);
upDownTrans = 0;
leftRightTrans = 0;
motion = false;
leftRightAngle = 101.501297;
upDownAngle = -180.098694;
fwdBackTrans = 14.800003;
farbe = new Tuple[count];
trans = new Tuple[count];
for (int i = 0; i < count; i++) {
farbe[i] = new Tuple();
trans[i] = new Tuple();
}
}
void draw() {
background(0);
if (!blobby) {
lights();
}
pushMatrix();
translate(width/2, height/2);
scale(min(width, height) / 10.0);
translate(0, 0, -20 + fwdBackTrans);
rotateY(radians(36 + leftRightAngle)); //, 0, 1, 0);
rotateX(radians(-228 + upDownAngle)); //, 1, 0, 0);
strokeWeight(0.1);
if (blobby) {
stroke(0.35, 0.35, 0.25, 0.15);
wireCone(MAX_RADIUS, MAX_RADIUS * CONE_HEIGHT, 18, 18);
}
else {
stroke(0.35, 0.35, 0.25, 0.25);
wireCone(MAX_RADIUS, MAX_RADIUS * CONE_HEIGHT, 180, 18);
}
noStroke();
video.loadPixels();
for (int i = 0; i < count; i++) {
int pixelColor = video.pixels[i];
int r = (pixelColor >> 16) & 0xff;
int g = (pixelColor >> 8) & 0xff;
int b = pixelColor & 0xff;
Color.RGBtoHSB(r, g, b, hsb);
float radius = hsb[1] * hsb[2];
float angle = hsb[0] * 360.0 * DEG_TO_RAD;
float nx = MAX_RADIUS * radius * cos(angle);
float ny = MAX_RADIUS * radius * sin(angle);
float nz = hsb[2] * MAX_RADIUS * CONE_HEIGHT;
trans[i].set(trans[i].x - (trans[i].x - nx)*STEP_AMOUNT,
trans[i].y - (trans[i].y - ny)*STEP_AMOUNT,
trans[i].z - (trans[i].z - nz)*STEP_AMOUNT);
farbe[i].set(farbe[i].x - (farbe[i].x - r)*STEP_AMOUNT,
farbe[i].y - (farbe[i].y - g)*STEP_AMOUNT,
farbe[i].z - (farbe[i].z - b)*STEP_AMOUNT);
pushMatrix();
farbe[i].phil();
trans[i].tran();
rotate(radians(45), 1, 1, 0);
if (blobby) {
sphere(BOX_SIZE * 2); //, 20, 20);
} else {
box(BOX_SIZE);
}
popMatrix();
}
popMatrix();
if (motion) {
upDownAngle--;
leftRightAngle--;
}
if (cheatScreen) {
image(video, 0, height - video.height);
}
}
void captureEvent(Capture c) {
c.read();
}
void keyPressed() {
switch (key) {
case 'g':
saveFrame();
break;
case 'c':
cheatScreen = !cheatScreen;
break;
case 'm':
motion = !motion;
break;
case '=':
fwdBackTrans += TRANS_INCREMENT;
break;
case '-':
fwdBackTrans -= TRANS_INCREMENT;
break;
case 'b':
blobby = !blobby;
break;
}
}
void mouseDragged() {
float dX, dY;
switch (mouseButton) {
case LEFT: // left right up down
dX = pmouseX - mouseX;
dY = pmouseY - mouseY;
leftRightAngle -= dX * 0.2;
upDownAngle += dY * 0.4;
break;
case CENTER:
dX = pmouseX - mouseX;
dY = pmouseY - mouseY;
leftRightTrans -= TRANS_INCREMENT * dX;
upDownTrans -= TRANS_INCREMENT * dY;
break;
case RIGHT: // in and out
dY = (float) (pmouseY - mouseY);
fwdBackTrans -= TRANS_INCREMENT * dY;
break;
}
}
void wireCone(float radius, float height, int stepX, int stepY) {
int steps = 10;
stroke(40);
for (int i = 0; i < steps; i++) {
float angle = map(i, 0, steps, 0, TWO_PI);
float x = radius * cos(angle);
float y = radius * sin(angle);
line(x, y, height, 0, 0, 0);
}
noFill();
pushMatrix();
translate(0, 0, height);
ellipseMode(CENTER);
ellipse(0, 0, radius, radius);
popMatrix();
}

View file

@ -0,0 +1,33 @@
// Simple vector class that holds an x,y,z position.
class Tuple {
float x, y, z;
Tuple() { }
Tuple(float x, float y, float z) {
set(x, y, z);
}
void set(float x, float y, float z) {
this.x = x;
this.y = y;
this.z = z;
}
void target(Tuple another, float amount) {
float amount1 = 1.0 - amount;
x = x*amount1 + another.x*amount;
y = y*amount1 + another.y*amount;
z = z*amount1 + another.z*amount;
}
void phil() {
fill(x, y, z);
}
void tran() {
translate(x, y, z);
}
}

View file

@ -0,0 +1,57 @@
/**
* Live Pocky
* by Ben Fry.
*
* Unwrap each frame of live video into a single line of pixels.
*/
import processing.video.*;
Capture video;
int count;
int writeRow;
int maxRows;
int topRow;
int buffer[];
void setup() {
size(600, 400);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 320, 240);
// Start capturing the images from the camera
video.start();
maxRows = height * 2;
buffer = new int[width * maxRows];
writeRow = height - 1;
topRow = 0;
background(0);
loadPixels();
}
void draw() {
video.loadPixels();
arraycopy(video.pixels, 0, buffer, writeRow * width, width);
writeRow++;
if (writeRow == maxRows) {
writeRow = 0;
}
topRow++;
for (int y = 0; y < height; y++) {
int row = (topRow + y) % maxRows;
arraycopy(buffer, row * width, g.pixels, y*width, width);
}
updatePixels();
}
void captureEvent(Capture c) {
c.read();
}

View file

@ -0,0 +1,73 @@
/**
* Mirror
* by Daniel Shiffman.
*
* Each pixel from the video source is drawn as a rectangle with rotation based on brightness.
*/
import processing.video.*;
// Size of each cell in the grid
int cellSize = 20;
// Number of columns and rows in our system
int cols, rows;
// Variable for capture device
Capture video;
void setup() {
size(640, 480);
frameRate(30);
cols = width / cellSize;
rows = height / cellSize;
colorMode(RGB, 255, 255, 255, 100);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
background(0);
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
// Begin loop for columns
for (int i = 0; i < cols; i++) {
// Begin loop for rows
for (int j = 0; j < rows; j++) {
// Where are we, pixel-wise?
int x = i*cellSize;
int y = j*cellSize;
int loc = (video.width - x - 1) + y*video.width; // Reversing x to mirror the image
float r = red(video.pixels[loc]);
float g = green(video.pixels[loc]);
float b = blue(video.pixels[loc]);
// Make a new color with an alpha component
color c = color(r, g, b, 75);
// Code for drawing a single rect
// Using translate in order for rotation to work properly
pushMatrix();
translate(x+cellSize/2, y+cellSize/2);
// Rotation formula based on brightness
rotate((2 * PI * brightness(c) / 255.0));
rectMode(CENTER);
fill(c);
noStroke();
// Rects are larger than the cell for some overlap
rect(0, 0, cellSize+6, cellSize+6);
popMatrix();
}
}
}
}

View file

@ -0,0 +1,63 @@
/**
* Mirror 2
* by Daniel Shiffman.
*
* Each pixel from the video source is drawn as a rectangle with size based on brightness.
*/
import processing.video.*;
// Size of each cell in the grid
int cellSize = 15;
// Number of columns and rows in our system
int cols, rows;
// Variable for capture device
Capture video;
void setup() {
size(640, 480);
// Set up columns and rows
cols = width / cellSize;
rows = height / cellSize;
colorMode(RGB, 255, 255, 255, 100);
rectMode(CENTER);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
background(0);
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
background(0, 0, 255);
// Begin loop for columns
for (int i = 0; i < cols;i++) {
// Begin loop for rows
for (int j = 0; j < rows;j++) {
// Where are we, pixel-wise?
int x = i * cellSize;
int y = j * cellSize;
int loc = (video.width - x - 1) + y*video.width; // Reversing x to mirror the image
// Each rect is colored white with a size determined by brightness
color c = video.pixels[loc];
float sz = (brightness(c) / 255.0) * cellSize;
fill(255);
noStroke();
rect(x + cellSize/2, y + cellSize/2, sz, sz);
}
}
}
}

View file

@ -0,0 +1,81 @@
/**
* Radial Pocky
* by Ben Fry.
*
* Unwrap each frame of live video into a single line of pixels along a circle
*/
import processing.video.*;
Capture video;
int videoCount;
int currentAngle;
int pixelCount;
int angleCount = 200; // how many divisions
int radii[];
int angles[];
void setup() {
// size must be set to video.width*video.height*2 in both directions
size(600, 600);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, 160, 120);
// Start capturing the images from the camera
video.start();
videoCount = video.width * video.height;
pixelCount = width*height;
int centerX = width / 2;
int centerY = height / 2;
radii = new int[pixelCount];
angles = new int[pixelCount];
int offset = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int dx = centerX - x;
int dy = centerY - y;
float angle = atan2(dy, dx);
if (angle < 0) angle += TWO_PI;
angles[offset] = (int) (angleCount * (angle / TWO_PI));
int radius = (int) mag(dx, dy);
if (radius >= videoCount) {
radius = -1;
angles[offset] = -1;
}
radii[offset] = radius;
offset++;
}
}
background(0);
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
loadPixels();
for (int i = 0; i < pixelCount; i++) {
if (angles[i] == currentAngle) {
pixels[i] = video.pixels[radii[i]];
}
}
updatePixels();
currentAngle++;
if (currentAngle == angleCount) {
currentAngle = 0;
}
}
}

View file

@ -0,0 +1,56 @@
/**
* Simple Real-Time Slit-Scan Program.
* By Golan Levin.
*
* This demonstration depends on the canvas height being equal
* to the video capture height. If you would prefer otherwise,
* consider using the image copy() function rather than the
* direct pixel-accessing approach I have used here.
*/
import processing.video.*;
Capture video;
int videoSliceX;
int drawPositionX;
void setup() {
size(600, 240);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this,320, 240);
// Start capturing the images from the camera
video.start();
videoSliceX = video.width / 2;
drawPositionX = width - 1;
background(0);
}
void draw() {
if (video.available()) {
video.read();
video.loadPixels();
// Copy a column of pixels from the middle of the video
// To a location moving slowly across the canvas.
loadPixels();
for (int y = 0; y < video.height; y++){
int setPixelIndex = y*width + drawPositionX;
int getPixelIndex = y*video.width + videoSliceX;
pixels[setPixelIndex] = video.pixels[getPixelIndex];
}
updatePixels();
drawPositionX--;
// Wrap the position back to the beginning if necessary.
if (drawPositionX < 0) {
drawPositionX = width - 1;
}
}
}

View file

@ -0,0 +1,131 @@
/**
* Spatiotemporal
* by David Muth
*
* Records a number of video frames into memory, then plays back the video
* buffer by turning the time axis into the x-axis and vice versa
*/
import processing.video.*;
Capture video;
int signal = 0;
//the buffer for storing video frames
ArrayList frames;
//different program modes for recording and playback
int mode = 0;
int MODE_NEWBUFFER = 0;
int MODE_RECORDING = 1;
int MODE_PLAYBACK = 2;
int currentX = 0;
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
}
void captureEvent(Capture c) {
c.read();
//create a new buffer in case one is needed
if (mode == MODE_NEWBUFFER) {
frames = new ArrayList();
mode = MODE_RECORDING;
}
//record into the buffer until there are enough frames
if (mode == MODE_RECORDING) {
//copy the current video frame into an image, so it can be stored in the buffer
PImage img = createImage(width, height, RGB);
video.loadPixels();
arrayCopy(video.pixels, img.pixels);
frames.add(img);
//in case enough frames have been recorded, switch to playback mode
if (frames.size() >= width) {
mode = MODE_PLAYBACK;
}
}
}
void draw() {
loadPixels();
//code for the recording mode
if (mode == MODE_RECORDING) {
//set the image counter to 0
int currentImage = 0;
//begin a loop for displaying pixel columns
for (int x = 0; x < video.width; x++) {
//go through the frame buffer and pick an image using the image counter
if (currentImage < frames.size()) {
PImage img = (PImage)frames.get(currentImage);
//display a pixel column of the current image
if (img != null) {
img.loadPixels();
for (int y = 0; y < video.height; y++) {
pixels[x + y * width] = img.pixels[x + y * video.width];
}
}
//increase the image counter
currentImage++;
}
else {
break;
}
}
}
//code for displaying the spatiotemporal transformation
if (mode == MODE_PLAYBACK) {
//begin a loop for displaying pixel columns
for (int x = 0; x < video.width; x++) {
//get an image from the buffer using loopcounter x as the index
PImage img = (PImage)frames.get(x);
if (img != null) {
img.loadPixels();
//pick the same column from each image for display,
//then distribute the columns over the x-axis on the screen
for(int y = 0; y < video.height; y++) {
pixels[x + y * width] = img.pixels[currentX + y * video.width];
}
}
}
//a different column shall be used next time draw() is being called
currentX++;
//if the end of the buffer is reached
if(currentX >= video.width) {
//create a new buffer when the next video frame arrives
mode = MODE_NEWBUFFER;
//reset the column counter
currentX = 0;
}
}
updatePixels();
}

View file

@ -0,0 +1,84 @@
/**
* Time Displacement
* by David Muth
*
* Keeps a buffer of video frames in memory and displays pixel rows
* taken from consecutive frames distributed over the y-axis
*/
import processing.video.*;
Capture video;
int signal = 0;
//the buffer for storing video frames
ArrayList frames = new ArrayList();
void setup() {
size(640, 480);
// This the default video input, see the GettingStartedCapture
// example if it creates an error
video = new Capture(this, width, height);
// Start capturing the images from the camera
video.start();
}
void captureEvent(Capture camera) {
camera.read();
// Copy the current video frame into an image, so it can be stored in the buffer
PImage img = createImage(width, height, RGB);
video.loadPixels();
arrayCopy(video.pixels, img.pixels);
frames.add(img);
// Once there are enough frames, remove the oldest one when adding a new one
if (frames.size() > height/4) {
frames.remove(0);
}
}
void draw() {
// Set the image counter to 0
int currentImage = 0;
loadPixels();
// Begin a loop for displaying pixel rows of 4 pixels height
for (int y = 0; y < video.height; y+=4) {
// Go through the frame buffer and pick an image, starting with the oldest one
if (currentImage < frames.size()) {
PImage img = (PImage)frames.get(currentImage);
if (img != null) {
img.loadPixels();
// Put 4 rows of pixels on the screen
for (int x = 0; x < video.width; x++) {
pixels[x + y * width] = img.pixels[x + y * video.width];
pixels[x + (y + 1) * width] = img.pixels[x + (y + 1) * video.width];
pixels[x + (y + 2) * width] = img.pixels[x + (y + 2) * video.width];
pixels[x + (y + 3) * width] = img.pixels[x + (y + 3) * video.width];
}
}
// Increase the image counter
currentImage++;
} else {
break;
}
}
updatePixels();
// For recording an image sequence
//saveFrame("frame-####.jpg");
}

View file

@ -0,0 +1,77 @@
/**
* Frames
* by Andres Colubri.
*
* Moves through the video one frame at the time by using the
* arrow keys. It estimates the frame counts using the framerate
* of the movie file, so it might not be exact in some cases.
*/
import processing.video.*;
Movie mov;
int newFrame = 0;
void setup() {
size(640, 360);
background(0);
// Load and set the video to play. Setting the video
// in play mode is needed so at least one frame is read
// and we can get duration, size and other information from
// the video stream.
mov = new Movie(this, "transit.mov");
// Pausing the video at the first frame.
mov.play();
mov.jump(0);
mov.pause();
}
void movieEvent(Movie m) {
m.read();
}
void draw() {
background(0);
image(mov, 0, 0, width, height);
fill(255);
text(getFrame() + " / " + (getLength() - 1), 10, 30);
}
void keyPressed() {
if (key == CODED) {
if (keyCode == LEFT) {
if (0 < newFrame) newFrame--;
} else if (keyCode == RIGHT) {
if (newFrame < getLength() - 1) newFrame++;
}
}
setFrame(newFrame);
}
int getFrame() {
return ceil(mov.time() * 30) - 1;
}
void setFrame(int n) {
mov.play();
// The duration of a single frame:
float frameDuration = 1.0 / mov.frameRate;
// We move to the middle of the frame by adding 0.5:
float where = (n + 0.5) * frameDuration;
// Taking into account border effects:
float diff = mov.duration() - where;
if (diff < 0) {
where += diff - 0.25 * frameDuration;
}
mov.jump(where);
mov.pause();
}
int getLength() {
return int(mov.duration() * mov.frameRate);
}

Binary file not shown.

View file

@ -0,0 +1,29 @@
/**
* Loop.
*
* Shows how to load and play a QuickTime movie file.
*
*/
import processing.video.*;
Movie movie;
void setup() {
size(640, 360);
background(0);
// Load and play the video in a loop
movie = new Movie(this, "transit.mov");
movie.loop();
}
void movieEvent(Movie m) {
m.read();
}
void draw() {
//if (movie.available() == true) {
// movie.read();
//}
image(movie, 0, 0, width, height);
}

Binary file not shown.

View file

@ -0,0 +1,51 @@
/**
* Pixelate
* by Hernando Barragan.
*
* Load a QuickTime file and display the video signal
* using rectangles as pixels by reading the values stored
* in the current video frame pixels array.
*/
import processing.video.*;
int numPixelsWide, numPixelsHigh;
int blockSize = 10;
Movie mov;
color movColors[];
void setup() {
size(640, 360);
noStroke();
mov = new Movie(this, "transit.mov");
mov.loop();
numPixelsWide = width / blockSize;
numPixelsHigh = height / blockSize;
println(numPixelsWide);
movColors = new color[numPixelsWide * numPixelsHigh];
}
// Display values from movie
void draw() {
if (mov.available() == true) {
mov.read();
mov.loadPixels();
int count = 0;
for (int j = 0; j < numPixelsHigh; j++) {
for (int i = 0; i < numPixelsWide; i++) {
movColors[count] = mov.get(i*blockSize, j*blockSize);
count++;
}
}
}
background(255);
for (int j = 0; j < numPixelsHigh; j++) {
for (int i = 0; i < numPixelsWide; i++) {
fill(movColors[j*numPixelsWide + i]);
rect(i*blockSize, j*blockSize, blockSize, blockSize);
}
}
}

Binary file not shown.

View file

@ -0,0 +1,48 @@
/**
* Reverse playback example.
*
* The Movie.speed() method allows to change the playback speed.
* Use negative values for backwards playback. Note that not all
* video formats support backwards playback. This depends on the
* underlying gstreamer plugins used by gsvideo. For example, the
* theora codec does support backward playback, but not so the H264
* codec, at least in its current version.
*
*/
import processing.video.*;
Movie mov;
boolean speedSet = false;
boolean once = true;
void setup() {
size(640, 360);
background(0);
mov = new Movie(this, "transit.mkv");
mov.play();
}
void movieEvent(Movie m) {
m.read();
if (speedSet == true) {
speedSet = false;
}
}
void draw() {
if (speedSet == false && once == true) {
// Setting the speed should be done only once,
// this is the reason for the if statement.
speedSet = true;
once = false;
mov.jump(mov.duration());
// -1 means backward playback at normal speed.
mov.speed(-1.0);
// Setting to play again, since the movie stop
// playback once it reached the end.
mov.play();
}
image(mov, 0, 0, width, height);
}

Binary file not shown.

View file

@ -0,0 +1,39 @@
/**
* Scratch
* by Andres Colubri.
*
* Move the cursor horizontally across the screen to set
* the position in the movie file.
*/
import processing.video.*;
Movie mov;
void setup() {
size(640, 360);
background(0);
mov = new Movie(this, "transit.mov");
// Pausing the video at the first frame.
mov.play();
mov.jump(0);
mov.pause();
}
void draw() {
if (mov.available()) {
mov.read();
// A new time position is calculated using the current mouse location:
float f = map(mouseX, 0, width, 0, 1);
float t = mov.duration() * f;
mov.play();
mov.jump(t);
mov.pause();
}
image(mov, 0, 0);
}

Binary file not shown.

View file

@ -0,0 +1,33 @@
/**
* Speed.
*
* Use the Movie.speed() method to change
* the playback speed.
*
*/
import processing.video.*;
Movie mov;
void setup() {
size(640, 360);
background(0);
mov = new Movie(this, "transit.mov");
mov.loop();
}
void movieEvent(Movie movie) {
mov.read();
}
void draw() {
image(mov, 0, 0);
float newSpeed = map(mouseX, 0, width, 0.1, 2);
mov.speed(newSpeed);
fill(255);
text(nfc(newSpeed, 2) + "X", 10, 30);
}

Binary file not shown.

View file

@ -0,0 +1,11 @@
name=Video
category=Video & Vision
authors=The Processing Foundation
url=http://processing.org/reference/libraries/video/index.html
sentence=GStreamer-based video library for Processing.
paragraph=
version=2
prettyVersion=1.0.1
lastUpdated=0
minRevision=228
maxRevision=0

View file

@ -0,0 +1 @@
name = Video

Binary file not shown.

BIN
lib/video/library/jna.jar Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more