I was trying to run an OpenCV program to run a DNN on an Android device using the OpenCv DNN module with Tensorflow and an SSD. However, I keep receiving this error when I try to run the program. CvException - cv::Exception: OpenCV(4.1.0) /build/master_pack-android/opencv/modules/dnn/src/dnn.cpp:692: error: (-2.15:Assertion failed) inputs.size() == requiredOutputs in function 'getMemoryShapes' I do not know how to fix this error because as far as I know, my code to access the protobuf and protobuf text file seems correct. My code to access the variables and read from them seem correct and the other OpenCV loaders are in other classes which seem to work fine. My code to access the pb and the pbtxt file are listed below:
public Mat processFrame(Mat inputFrame) {
final int IN_WIDTH = 300;
final int IN_HEIGHT = 300;
final double IN_SCALE_FACTOR = 1;
final double MEAN_VAL = 0;
final double THRESHOLD = 0.85;
// Get a new frame
Imgproc.cvtColor(inputFrame, inputFrame, Imgproc.COLOR_RGBA2RGB);
// Forward image through network.
Mat blob = Dnn.blobFromImage(inputFrame, IN_SCALE_FACTOR, new Size(IN_WIDTH, IN_HEIGHT), new Scalar(MEAN_VAL, MEAN_VAL, MEAN_VAL), true, false);
net.setInput(blob);
List<List<Double>> blobList = new ArrayList<>();
Mat detections = net.forward();
int cols = inputFrame.cols();
int rows = inputFrame.rows();
detections = detections.reshape(1, (int) detections.total() / 7);
for (int i = 0; i < detections.rows(); ++i) {
System.out.println(detections);
double confidence = detections.get(i, 2)[0];
if (confidence > THRESHOLD) {
int classId = (int) detections.get(i, 1)[0];
int left = (int) (detections.get(i, 3)[0] * cols);
int top = (int) (detections.get(i, 4)[0] * rows);
int right = (int) (detections.get(i, 5)[0] * cols);
int bottom = (int) (detections.get(i, 6)[0] * rows);
List<Double> list = new ArrayList<>();
list.add(confidence);
list.add(Double.valueOf(left));
list.add(Double.valueOf(top));
list.add(Double.valueOf(right));
list.add(Double.valueOf(bottom));
list.add(Double.valueOf(classId));
blobList.add(list);
}
}
Collections.sort(blobList, new Comparator<List<Double>>() {
#Override
public int compare(List<Double> a, List<Double> b) {
return a.get(0) > b.get(0) ? 1 : -1;
}
});
Collections.reverse(blobList);
int maxIndex = blobList.size() > 6 ? 6 : blobList.size();
int numOfSkystone = 0;
for (int i = 0; i < 6; i++) {
List<Double> blobStuff = blobList.get(i);
String detectedObj = "";
double v = blobStuff.get(5).doubleValue();
if (v == 3.0) {
detectedObj = "Skystone";
numOfSkystone++;
} else if (v == 4.0) {
detectedObj = "Stone";
} else if (v == 2.0) {
detectedObj = "Red Foundation";
} else if (v == 1.0) {
detectedObj = "Blue Foundation";
} else {
detectedObj = "Unknown";
}
String label = detectedObj + ": " + blobStuff.get(0);
int[] baseLine = new int[1];
Size labelSize = Imgproc.getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);
Imgproc.rectangle(inputFrame, new Point((int) blobStuff.get(1).intValue(), (int) blobStuff.get(2).intValue() - labelSize.height),
new Point((int) blobStuff.get(1).intValue() + labelSize.width, (int) blobStuff.get(2).intValue() + baseLine[0]),
new Scalar(255, 255, 255), Imgproc.FILLED);
// Write class name and confidence.
Imgproc.putText(inputFrame, label, new Point(blobStuff.get(0), blobStuff.get(2)),
FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
}
return inputFrame;
}
As far as I know, the code for that seems correct but I feel like the error may be within the pbtxt file, but I have no clue how to fix it. The pbtxt had to be attacked as a link mainly because of the reason that it is very very long close to 5000 lines and I ran the tf_graph_ssd.py file in order to generate it. However, all the lines in that too seem correct can anyone help me understand what is going wrong with it?
PB file, Pbtxt, config: https://drive.google.com/drive/folders/19g7xnC9ekjUHeFNtgcvSn2J1ioWmrb_L
It seems that there were extraneous lines generated when trying to come up with a .pbtxt file. Deleting the first 40 lines ended up solving the problem.
Related
I am trying use Mask_RCNN in android. So, written its code. But Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH) function is not able to open the model weights and text graph file. Seems like they app is not able to find the path of files stored.
I have inserted these files(Text_Graph and Model_Weights) in java folder where mainactivity.java file is present. But it shows same error as mentioned below.
I also tried adding these files to res folder and assets folder but could not parse the model weight path to the function Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH) because its arguments are of String type. But assetmanager returns input stream.
I am using opencv 3.4.10 and android studio 4.0.
Also please help me in drawing contours over image in java. If anyone knows how to run it in android using Model in C++ then please suggest. As I have already tried it in that also but was getting build error of undefined reference to 'cv::dnn::experimental_dnn_v4::Net::~Net()'
Any help would be appreciated. Thanks in advance.
On running the app it is giving exception of : ****
Caused by: CvException [org.opencv.core.CvException: cv::Exception:
OpenCV(3.4.10)
/build/3_4_pack-android/opencv/modules/dnn/src/caffe/caffe_io.cpp:1132:
error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open
"./frozen_inference_graph.pb" in function 'bool
cv::dnn::ReadProtoFromBinaryFile(const char*,
google::protobuf::Message*)'
]
at org.opencv.dnn.Dnn.readNetFromTensorflow_0(Native Method)
at org.opencv.dnn.Dnn.readNetFromTensorflow(Dnn.java:659)
at com.example.imagecompressor.MainActivity.onActivityResult(MainActivity.java:189)(Error
at this line : Net net = Dnn.readNetFromTensorflow(MODEL_WEIGHTS,
TEXT_GRAPH);)
at android.app.Activity.dispatchActivityResult(Activity.java:7454)
at android.app.ActivityThread.deliverResults(ActivityThread.java:4353)
Source code is given below:
Mainactivity.java
final String TEXT_GRAPH = "./mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";
final String MODEL_WEIGHTS = "./frozen_inference_graph.pb";
final String CLASSES_FILE ="./mscoco_labels";
Mat tmp = new Mat(bitmap.getWidth(), bitmap.getHeight(), CV_8UC1);
Utils.bitmapToMat(bitmap, tmp);
Mat image = Imgcodecs.imread(img_path);
image=tmp;
Size size = image.size();
int cols = image.cols();
int rows = image.rows();
double h = size.height;
double w = size.width;
int hh = (int)size.height;
int ww = (int)size.width;
if(!image.empty()) {
Mat blob = Dnn.blobFromImage(image, 1.0, new Size(w, h), new Scalar(0), true, false);
// Load the network
Net net = Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH);
net.setPreferableBackend(Dnn.DNN_BACKEND_OPENCV);
net.setPreferableTarget(Dnn.DNN_TARGET_CPU);
net.setInput(blob);
ArrayList<String> outputlayers = new ArrayList<String>();
ArrayList<Mat> outputMats = new ArrayList<Mat>();
outputlayers.add("detection_out_final");
outputlayers.add("detection_masks");
net.forward(outputMats, outputlayers);
Mat numClasses = outputMats.get(0);
Mat numMasks = outputMats.get(1);
numClasses = numClasses.reshape(1, (int) numClasses.total() / 7);
for (int i = 0; i < numClasses.rows(); ++i) {
double confidence = numClasses.get(i, 2)[0];
//System.out.println(confidence);
// Mat objectMask=outputMats.get(i);
if (confidence > 0.5) {
int classId = (int) numClasses.get(i, 1)[0];
String label = classes.get(classId) + ": " + confidence;
System.out.println(label);
int left = (int) (numClasses.get(i, 3)[0] * cols);
int top = (int) (numClasses.get(i, 4)[0] * rows);
int right = (int) (numClasses.get(i, 5)[0] * cols);
int bottom = (int) (numClasses.get(i, 6)[0] * rows);
System.out.println(left + " " + top + " " + right + " " + bottom);
left = max(0, min(left, cols - 1));
top = max(0, min(top, rows - 1));
right = max(0, min(right, cols - 1));
bottom = max(0, min(bottom, rows - 1));
final Rect box = new Rect(left, top, right - left + 1, bottom - top + 1);
//Mat objectMask(numMasks.rows(), numMasks.size[3],CV_32F, numMasks.ptr<float>(i,classId));
// Mat obj();
Mat objectMask = new Mat(numMasks.rows(), numMasks.cols(), CV_32F);
rectangle(image, new Point(box.x, box.y), new Point(box.x + box.width, box.y + box.height), new Scalar(255, 178, 50), 3);
/* String lab = format("%.2f", confidence);
if (!classes.isEmpty()){
//CV_Assert(classId < (int)classes.size());
if(classId<(int)classes.size()) {
lab = classes.get(classId) + ":" + lab;
}
}*/
Scalar color = new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256));
double maskThreshold = 0.3;
// Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, new Size(box.width, box.height));
Imgproc.threshold(objectMask, objectMask, 255 * maskThreshold, 255, Imgproc.THRESH_BINARY);
// Mat mask = (objectMask > maskThreshold);
Mat ili = new Mat();
multiply(image, new Scalar(0.7), ili);
Mat coloredRoi = new Mat();
add(ili, new Scalar(0.3).mul(color), coloredRoi);
coloredRoi.convertTo(coloredRoi, CV_8UC3);
List<MatOfPoint> contours = null;
Mat hierarchy = null;
objectMask.convertTo(objectMask, CV_8U);
findContours(objectMask, contours, hierarchy, RETR_CCOMP,CHAIN_APPROX_SIMPLE);
drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
coloredRoi.copyTo(image, objectMask);
}
Mat detectedFrame = new Mat();
image.convertTo(detectedFrame, CV_8U);
Imgcodecs.imwrite("outputFile.jpg", detectedFrame);
}
}
I'm working on a project which read an image as an input and show and output image. The output image contains some lines to indicate the human body skeleton. I'm using pose estimation model from tensorflow-lite:
https://www.tensorflow.org/lite/models/pose_estimation/overview
I have read the docs, and it shows that the output contains a 4-dimensions array. I have tried to use netron to visualize my model file and it looks like this:
I succeeded to get the result heatmap from the input but I got a problem that all the float are negative. It makes me confused and I'm not sure if I did anything wrong or how to understand these outputs.
Here's the code for the output
tfLite = new Interpreter(loadModelFile());
Bitmap inputPhoto = BitmapFactory.decodeResource(getResources(), R.drawable.human2);
inputPhoto = Bitmap.createScaledBitmap(inputPhoto, INPUT_SIZE_X, INPUT_SIZE_Y, false);
inputPhoto = inputPhoto.copy(Bitmap.Config.ARGB_8888, true);
int pixels[] = new int[INPUT_SIZE_X * INPUT_SIZE_Y];
inputPhoto.getPixels(pixels, 0, INPUT_SIZE_X, 0, 0, INPUT_SIZE_X, INPUT_SIZE_Y);
int pixelsIndex = 0;
for (int i = 0; i < INPUT_SIZE_X; i ++) {
for (int j = 0; j < INPUT_SIZE_Y; j++) {
int p = pixels[pixelsIndex];
inputData[0][i][j][0] = (p >> 16) & 0xff;
inputData[0][i][j][1] = (p >> 8) & 0xff;
inputData[0][i][j][2] = (p) & 0xff;
pixelsIndex ++;
}
}
float outputData[][][][] = new float[1][23][17][17];
tfLite.run(inputData, outputData);
The output is an array [1][23][17][17] which is all negative. So is there anyone who known about this can help me :(
Thanks a lot !
This post came Active today so I post a late answer, sorry about that.
You should check the Posenet.kt file. There you can see a very detailed documented code. You can see how this:
Initializes an outputMap of 1 * x * y * z FloatArrays for the model processing to populate.
*/
private fun initOutputMap(interpreter: Interpreter): HashMap<Int, Any> {
val outputMap = HashMap<Int, Any>()
// 1 * 9 * 9 * 17 contains heatmaps
val heatmapsShape = interpreter.getOutputTensor(0).shape()
outputMap[0] = Array(heatmapsShape[0]) {
Array(heatmapsShape[1]) {
Array(heatmapsShape[2]) { FloatArray(heatmapsShape[3]) }
}
}
// 1 * 9 * 9 * 34 contains offsets
val offsetsShape = interpreter.getOutputTensor(1).shape()
outputMap[1] = Array(offsetsShape[0]) {
Array(offsetsShape[1]) { Array(offsetsShape[2]) { FloatArray(offsetsShape[3]) } }
}
// 1 * 9 * 9 * 32 contains forward displacements
val displacementsFwdShape = interpreter.getOutputTensor(2).shape()
outputMap[2] = Array(offsetsShape[0]) {
Array(displacementsFwdShape[1]) {
Array(displacementsFwdShape[2]) { FloatArray(displacementsFwdShape[3]) }
}
}
// 1 * 9 * 9 * 32 contains backward displacements
val displacementsBwdShape = interpreter.getOutputTensor(3).shape()
outputMap[3] = Array(displacementsBwdShape[0]) {
Array(displacementsBwdShape[1]) {
Array(displacementsBwdShape[2]) { FloatArray(displacementsBwdShape[3]) }
}
}
return outputMap
}
and of course how the output is transformed to points on screen:
/**
* Estimates the pose for a single person.
* args:
* bitmap: image bitmap of frame that should be processed
* returns:
* person: a Person object containing data about keypoint locations and confidence scores
*/
fun estimateSinglePose(bitmap: Bitmap): Person {
val estimationStartTimeNanos = SystemClock.elapsedRealtimeNanos()
val inputArray = arrayOf(initInputArray(bitmap))
Log.i(
"posenet",
String.format(
"Scaling to [-1,1] took %.2f ms",
1.0f * (SystemClock.elapsedRealtimeNanos() - estimationStartTimeNanos) / 1_000_000
)
)
val outputMap = initOutputMap(getInterpreter())
val inferenceStartTimeNanos = SystemClock.elapsedRealtimeNanos()
getInterpreter().runForMultipleInputsOutputs(inputArray, outputMap)
lastInferenceTimeNanos = SystemClock.elapsedRealtimeNanos() - inferenceStartTimeNanos
Log.i(
"posenet",
String.format("Interpreter took %.2f ms", 1.0f * lastInferenceTimeNanos / 1_000_000)
)
val heatmaps = outputMap[0] as Array<Array<Array<FloatArray>>>
val offsets = outputMap[1] as Array<Array<Array<FloatArray>>>
val height = heatmaps[0].size
val width = heatmaps[0][0].size
val numKeypoints = heatmaps[0][0][0].size
// Finds the (row, col) locations of where the keypoints are most likely to be.
val keypointPositions = Array(numKeypoints) { Pair(0, 0) }
for (keypoint in 0 until numKeypoints) {
var maxVal = heatmaps[0][0][0][keypoint]
var maxRow = 0
var maxCol = 0
for (row in 0 until height) {
for (col in 0 until width) {
if (heatmaps[0][row][col][keypoint] > maxVal) {
maxVal = heatmaps[0][row][col][keypoint]
maxRow = row
maxCol = col
}
}
}
keypointPositions[keypoint] = Pair(maxRow, maxCol)
}
// Calculating the x and y coordinates of the keypoints with offset adjustment.
val xCoords = IntArray(numKeypoints)
val yCoords = IntArray(numKeypoints)
val confidenceScores = FloatArray(numKeypoints)
keypointPositions.forEachIndexed { idx, position ->
val positionY = keypointPositions[idx].first
val positionX = keypointPositions[idx].second
yCoords[idx] = (
position.first / (height - 1).toFloat() * bitmap.height +
offsets[0][positionY][positionX][idx]
).toInt()
xCoords[idx] = (
position.second / (width - 1).toFloat() * bitmap.width +
offsets[0][positionY]
[positionX][idx + numKeypoints]
).toInt()
confidenceScores[idx] = sigmoid(heatmaps[0][positionY][positionX][idx])
}
val person = Person()
val keypointList = Array(numKeypoints) { KeyPoint() }
var totalScore = 0.0f
enumValues<BodyPart>().forEachIndexed { idx, it ->
keypointList[idx].bodyPart = it
keypointList[idx].position.x = xCoords[idx]
keypointList[idx].position.y = yCoords[idx]
keypointList[idx].score = confidenceScores[idx]
totalScore += confidenceScores[idx]
}
person.keyPoints = keypointList.toList()
person.score = totalScore / numKeypoints
return person
}
The whole .kt file is the heart of bitmap to points on screen!
If you need anything else tag me.
Happy coding
I'm creating a program that reads a text file with a series of x/y coordinates and places a small square where each of them should go. The program also has to deal with another text file which has a series of x/y coordinates and a double value which represents a signal strength in decibels. When I go to launch to the program it displays a black screen with a small white rectangle in the top left corner. What is wrong with my code? I'm not getting any errors in the console. The problem is almost certainly in my coveRage.java file in the second main for loop.
first text file...
500.0 500.0
250.0 250.0
second text file
1000.0 2500.0 -143.2
1213.0 2132.0 -100.7
Main.java
public class Main {
public static void main(String[] args) throws FileNotFoundException {
File towers = new File("towers.txt");
File readings = new File("readings.txt");
//System.out.println(new File(".").getAbsoluteFile());
Scanner towers1 = new Scanner(towers);
Scanner readings1 = new Scanner(readings);
ArrayList<Integer> towerPos = new ArrayList<Integer>();
ArrayList<Integer> readingPos = new ArrayList<Integer>();
while(towers1.hasNextDouble()) {
towerPos.add((int)towers1.nextDouble());
}
towers1.close();
while(readings1.hasNextDouble()) {
readingPos.add((int)readings1.nextDouble());
}
readings1.close();
JFrame f = new JFrame("Cellphone Coverage");
f.setVisible(true);
f.setSize(500, 500);
f.setDefaultCloseOperation(
JFrame.EXIT_ON_CLOSE);
f.add(new CoveRage(towerPos, readingPos));
}
}
coveRage.java
public class CoveRage
extends JComponent {
private ArrayList<Integer> readingPos;
private ArrayList<Integer> towerPos;
ArrayList<Integer> towerPosis = new ArrayList<Integer>();
ArrayList<Integer> distances = new ArrayList<Integer>();
int xAxis;
int yAxis;
public CoveRage(ArrayList<Integer> towerPos, ArrayList<Integer> readingPos) {
this.towerPos = towerPos;
this.readingPos = readingPos;
}
public void paintComponent(Graphics g) {
Graphics2D g2 = (Graphics2D) g.create();
for (int j = 0; j < towerPos.size(); j += 2) {
int xAxis = towerPos.get(j) / 10;
int yAxis = towerPos.get(j + 1) / 10;
towerPosis.add(xAxis); // adds to list for checking distance between tower and signal
towerPosis.add(yAxis);
g2.setColor(Color.black);
g2.fillRect(xAxis, yAxis, 5, 5);
}
for (int i = 0; i < readingPos.size(); i =+ 3) { // for there are still readings left take in 3 values and repeat
int xAxiss = readingPos.get(i) / 10; // grabs x axis of reading
int yAxiss = readingPos.get(i + 1) / 10; // grabs y axis of reading
int sigNal = readingPos.get(i + 2); // grabs signal strength of reading
for (int k = 0; k < towerPosis.size(); k=+2) { // for there are still readings in towerPosis
int distance = (int) Math.sqrt(Math.pow(towerPosis.get(k)-xAxiss, 2)+(Math.pow(towerPosis.get(k + 1)-yAxiss, 2))); // calulates distance between tower and reading
distances.add(distance); // add distance to arrayList
int leastDist = distances.get(0);
for (int u = 0; u < distances.size(); u++) { // for there are still distance
if (distances.get(u) < leastDist) {
leastDist = distances.get(u);
}
int expected = (int) ((int) 40*(Math.log10(1.0/leastDist)));
if (sigNal >= expected) {
g2.setColor(Color.green);
g2.fillRect(xAxiss, yAxiss, 5, 5);
} else if (sigNal <= expected - 9) {
g2.setColor(Color.red);
g2.fillRect(xAxiss, yAxiss, 5, 5);
}
}
}
}
}
}
You have a (number of) infinite loops caused by k = +2, which is the same as saying k = 2, so k never increases in size
If you change all the = + to += it will work just fine
I'm currently in the process of converting Java to JavaScript and need to change the colour of some images.
Right now each image is loaded within an Image class, an image looks like this:
It's a PNG which works as a character set, the data sent through is mapped to each character in the image.
The existing Java code looks like this:
class VDColorFilter extends RGBImageFilter
{
int fg;
int bg;
final int[] colors;
public VDColorFilter(final int fgc, final int bgc) {
super();
this.colors = new int[] { 0, 16711680, 65280, 16776960, 255, 16711935, 65535, 16777215 };
this.fg = fgc;
this.bg = bgc;
this.canFilterIndexColorModel = true;
}
public int filterRGB(final int x, final int y, int rgb) {
if (rgb == -1) {
rgb = (0xFF000000 | this.colors[this.bg]);
}
else if (rgb == -16777216) {
rgb = (0xFF000000 | this.colors[this.fg]);
}
return rgb;
}
}
I want to be able to do the same thing to my images, but in JavaScript. I don't have much experience with Java, so I'm unsure on how the filterRGB actually applies the RGB result, against the colors array.
Of course, this is only tinting the black of the image, not the white.
Are there any libraries out there which mimic this? If not, what is my best way of achieving the same result?
You can filter an image using getImageData() and putImageData(). This will require cross-origin resource sharing (CORS) to be fulfilled, e.g. the image comes from the same server as the page (a security mechanism in the browser).
If that part is OK, lets do an example using your image -
The best would be if your images had an alpha channel instead of white background. This would allow you to use composite operators to change the colors directly without having to parse the pixels.
You can do this two ways:
Punch out the background once and for all, then use composite operator (recommended)
Replace all black pixels with the color
With the first approach you only have to parse the pixels once. Every time you need to change the colors just use a composite operator (see demo 2 below).
Using Composite Operator
Here is a way to punch out the background first. We will be using a unsigned 32-bit buffer for this as this is faster than using a byte-array.
We can convert the byte-buffer by using the view's buffer and create a second view for it:
var data32 = new Uint32Array(idata.data.buffer);
See code below for details:
var img = new Image();
img.crossOrigin = "";
img.onload = punchOut;
img.src = "//i.imgur.com/8NWz72w.png";
function punchOut() {
var canvas = document.createElement("canvas"),
ctx = canvas.getContext("2d");
document.body.appendChild(this);
document.body.appendChild(canvas);
// set canvas size = image size
canvas.width = this.naturalWidth;
canvas.height = this.naturalHeight;
// draw in image
ctx.drawImage(this, 0, 0);
// get pixel data
var idata = ctx.getImageData(0, 0, canvas.width, canvas.height),
data32 = new Uint32Array(idata.data.buffer), // create a uint32 buffer
i = 0, len = data32.length;
while(i < len) {
if (data32[i] !== 0xff000000) data32[i] = 0; // if not black, set transparent
i++
}
ctx.putImageData(idata, 0, 0); // put pixels back on canvas
}
body {background:#aaa}
Now that we have a transparent image we can use composite modes to alter its colors. The mode we need to use is "source-atop":
var img = new Image();
img.crossOrigin = ""; img.onload = punchOut;
img.src = "//i.imgur.com/8NWz72w.png";
function punchOut() {
var canvas = document.querySelector("canvas"), ctx = canvas.getContext("2d");
canvas.width = this.naturalWidth;
canvas.height = this.naturalHeight;
ctx.drawImage(this, 0, 0);
var idata = ctx.getImageData(0, 0, canvas.width, canvas.height),
data32 = new Uint32Array(idata.data.buffer), i = 0, len = data32.length;
while(i < len) {if (data32[i] !== 0xff000000) data32[i] = 0; i++}
ctx.putImageData(idata, 0, 0);
// NEW PART --------------- (see previous demo for detail of the code above)
// alter color using composite mode
// This will replace existing non-transparent pixels with the next drawn object
ctx.globalCompositeOperation = "source-atop";
function setColor() {
for (var y = 0; y < 16; y++) {
for (var x = 0; x < 6; x++) {
var cw = (canvas.width - 1) / 6,
ch = (canvas.height - 1) / 16,
cx = cw * x,
cy = ch * y;
// set the desired color using fillStyle, here: using HSL just to make cycle
ctx.fillStyle = "hsl(" + (Math.random() * 360) + ", 100%, 80%)";
// fill the area with the new color, due to comp. mode only existing pixels
// will be changed
ctx.fillRect(cx+1, cy+1, cw-1, ch-1);
}
}
}
setInterval(setColor, 100);
// to reset comp. mode, use:
//ctx.globalCompositeOperation = "source-over";
}
body {background:#333}
<canvas></canvas>
And finally, use drawImage() to pick each letter based on mapping and cell calculations for each char (see for example the previous answer I gave you for drawImage usage).
Define a char map using a string
Find the letter using the map and indexOf()
Calculate the index of the map to x and y in the image
Use drawImage() to draw that letter to the x/y position in the output canvas
Random letters
var img = new Image();
img.crossOrigin = ""; img.onload = punchOut;
img.src = "http://i.imgur.com/8NWz72w.png";
function punchOut() {
var canvas = document.querySelector("canvas"), ctx = canvas.getContext("2d");
canvas.width = this.naturalWidth;
canvas.height = this.naturalHeight;
ctx.drawImage(this, 0, 0);
var idata = ctx.getImageData(0, 0, canvas.width, canvas.height),
data32 = new Uint32Array(idata.data.buffer), i = 0, len = data32.length;
while(i < len) {if (data32[i] !== 0xff000000) data32[i] = 0; i++}
ctx.putImageData(idata, 0, 0);
ctx.globalCompositeOperation = "source-atop";
function setColor() {
for (var y = 0; y < 16; y++) {
for (var x = 0; x < 6; x++) {
var cw = (canvas.width - 1) / 6,
ch = (canvas.height - 1) / 16,
cx = cw * x,
cy = ch * y;
ctx.fillStyle = "hsl(" + (Math.random() * 360) + ", 100%, 80%)";
ctx.fillRect(cx+1, cy+1, cw-1, ch-1);
}
}
}
setColor();
// NEW PART --------------- (see previous demo for detail of the code above)
var dcanvas = document.createElement("canvas"), xpos = 0;
ctx = dcanvas.getContext("2d");
document.body.appendChild(dcanvas);
for(var i = 0; i < 16; i++) {
var cw = (canvas.width - 1) / 6,
ch = (canvas.height - 1) / 16,
cx = cw * ((Math.random() * 6)|0), // random x
cy = ch * ((Math.random() * 16)|0); // random y
ctx.drawImage(canvas, cx+1, cy+1, cw-1, ch-1, xpos, 0, cw-1, ch-1);
xpos += 16;
}
}
body {background:#333}
<canvas></canvas>
I am fairly new at using processing and OpenCV, so please bear with me. I have posted my question on both StackOverflow and on the processing forum to reach a wider audience and find a solution -- so apologies if this looks as if I am carelessly cross-posting, as this is not my intentions.
I have been working on a light installation using Arduino and Processing. I am capturing people using face detection with a webcam and OpenCV, and translating it into a light installation that is working off a 16x32 LED matrix panel. The light installation is a response of the number of spectators who are viewing the lights and being recorded at the same time - the idea is that the more people being recorded will generate a greater display of flickering lights, and when no one is viewing/being recorded, the lights will not flicker.
I have managed to get OpenCV to detect faces and output it to Arduino to display on the lights, however, I can only get the lights to flash more intensely when people are not being detected, rather than when they are being detected. Therefore, the lights are working in reverse, and they flicker more when no-one is being detected, and less when there are people being detected.
I have attached both my Processing and Arduino code for reference, although I do not think that the Arduino code needs to be changed at all.
( I have highlighted an area in the processing code which I believe is the part that is causing the issue)
I would really appreciate who knows a way of changing this so the face detection triggers more lights
Thanks
Processing Code
import gab.opencv.*;
import java.awt.*;
import processing.video.*;
import processing.serial.*;
import java.awt.Color;
Capture video;
OpenCV opencv;
Serial myPort; // Create object from Serial class
int inByte = -1;
void setup()
{
size(640, 480);
video = new Capture(this, 640/2, 480/2);
opencv = new OpenCV(this, 640/2, 480/2);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
video.start();
println(Serial.list());
//colorMode(HSB, 100,100,100);
String portName = Serial.list()[5];
myPort = new Serial(this, Serial.list()[5], 9600);
}
void draw() {
scale(2);
opencv.loadImage(video);
image(video, 0, 0 );
noFill();
stroke(0, 255, 0);
strokeWeight(3);
Rectangle[] faces = opencv.detect();
println(faces.length);
// do cool stuff here:
int x = int(random(32));
int y = int(random(16));
int H = int(222);
int S = int(5);
int L = int(random(3));
int R = int(random(255));
int G = int(random(255));
int B = int(random(255));
int F = 0;
String toard = x + ":" + y + ":" + R + ":" + G + ":" + B + ":" + F +".";
myPort.write(toard);
**for (int i = 0; i < faces.length; i++) {
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
toard = x + ":" + y + ":" + 0 + ":" + 0 + ":" + 0 + ".";
myPort.write(toard);
if (faces.length == 0){
delay(faces[0].x);
}
}**
// listen back to the serial data from arduino
// this is handy for debugging
while (myPort.available () > 0) {
// send the string to the arduino over serial port
inByte = myPort.read();
}
}
void captureEvent(Capture c) {
c.read();
}
Arduino code
const char EOPmarker = '.';
char serialbuf[32];
#include <Adafruit_GFX.h>
#include <RGBmatrixPanel.h> // Hardware-specific library
#define MAX_STRING_LEN 20
#include <string.h>
#define CLK 8 // MUST be on PORTB! (Use pin 11 on Mega)
#define LAT A3
#define OE 9
#define A A0
#define B A1
#define C A2
RGBmatrixPanel matrix(A, B, C, CLK, LAT, OE, false);
uint8_t r=7, g=7, b=7;
void setup() {
Serial.begin(9600);
matrix.begin();
pinMode(13, OUTPUT);
digitalWrite(13, LOW);
}
void loop() {
if (Serial.available() > 0) {
static int bufpos = 0;
char inchar = Serial.read();
if (inchar != EOPmarker) {
serialbuf[bufpos] = inchar;
bufpos++;
}
else {
serialbuf[bufpos] = 0;
bufpos = 0; ![]()
}
// this is where we grab the x y HSB values and do whatever we thing is nice <span class="Emoticon Emoticon1"><span>:)</span></span>
// send back to processing for debugging
int x = atoi(subStr(serialbuf, ":", 1));
int y = atoi(subStr(serialbuf, ":", 2));
int R = atoi(subStr(serialbuf, ":", 3));
int G = atoi(subStr(serialbuf, ":", 4));
int B = atoi(subStr(serialbuf, ":", 5));
int F = atoi(subStr(serialbuf, ":", 6));
float vR = map(R, 0,255, 0,7);
float vG = map(G, 0,255, 0,7);
float vB = map(B, 0,255, 0,7);
Serial.write(x);
matrix.drawPixel(x, y, matrix.Color333(vR, vG, vB));
}
}
// this is the function that allows us to easily grab an item from the string by index
char* subStr (char* input_string, char *separator, int segment_number) {
char *act, *sub, *ptr;
static char copy[MAX_STRING_LEN];
int i;
strcpy(copy, input_string);
for (i = 1, act = copy; i <= segment_number; i++, act = NULL) {
sub = strtok_r(act, separator, &ptr);
if (sub == NULL) break;
}
return sub;
}