Hey guys I switched from Swift to Kotlin a few days ago and I'm trying to implement a similar function I have in swift into kotlin and failing
Here is what I'm doing in swift
private var issues: [PathPmCore.Issue] = [] {
didSet {
AppState.shared.issues = issues
taskModels.removeAll()
var models = [ListModel]()
var components = [Double]()
for status in TaskState.allCases {
let count = issues.filter({ isssue in
status.ids.contains(where: { statusId in
statusId == isssue.status.currentStatus.id
})
}).count
models.append(.init(activeTasks: .constant(count), circleState: .constant(status)))
components.append(Double(count))
}
DispatchQueue.main.async {
self.taskModels = models
self.chartComponents = components
}
}
The way I approached it in Kotlin is similar
private var issues: List<Issues> = emptyList()
set(value: List<Issues>){
for (status in TaskState.values()) {
val models: ArrayList<ListModel> = arrayListOf<ListModel>()
val components = listOf<Double>()
val count = issues.filterNotNull().forEach { issue ->
status.ids.find { statusId ->
statusId == issue.status.currentStatus.id
}
}
println(count)
value.count()
}
}
It works, with no errors but the one setter in Kotlin returns an empty array/list.
Why it's empty
It's empty because you are not setting the backing field !
Possible Solution (maybe)
So, if I understood right what you are trying to achieve, here is a possible solution for your problem
private var issues: List<Issues> = emptyList()
set(value: List<Issues>){
field = TaskState.values().map {
issues.filterNotNull().filter { issue ->
it.ids.any { statusId ->
statusId == issue.status.currentStatus.id
}
}
}.flatten()
}
I removed models and components because you are not using them.
I am trying to convert Java code into Swift. I have gone so far but stuck into custom comparator logic. I am wondering how to achieve that in Swift.
class CustomItem {
var start: Int
var end: Int
init(start: Int, end: Int) {
self.start = start
self.end = end
}
static func compare_customs(_ content: [UInt8], _ custom1: CustomItem , _ custom2 : CustomItem) -> Int {
// the logic is implemented here already!
}
}
var content: [UInt8] = output.getBuffer()
var items = [CustomItem]()
for i in 0..<offsets.count - 1 {
items[i] = CustomItem(start: offsets[i], end: offsets[i + 1])
}
items[offsets.count - 1] = CustomItem(start: offsets[offsets.count - 1], end: output.size())
// Swift Custom comparator logic here??
Trying to convert the following Java custom comparator logic into Swift. Any guidance would be appreaciated.
byte[] content = output.getBuffer();
java.util.Arrays.sort(items, new java.util.Comparator<CustomItem>() {
#Override
public int compare(CustomItem custom1, CustomItem custom2) {
return CustomItem.compare_customs(content, custom1, custom2);
}
});
It would be something like
items.sort { CustomItem.compare_customs(content, $0, $1) < 0 }
how can i frame Flow<ByteString, ByteString, NotUsed> by size? All examples I have found assumes that there is some delimiter, which is not my case, I just need to frame by length / size.
Framing via Framing.delimiter does require a designated delimiter, and there doesn't seem to be any built-in stream operator that does framing simply by a fixed chunk size. One of the challenges in coming up with a custom framing/chunking solution is to properly handle the last chunk of elements.
One solution would be to assemble a custom GraphStage like the "chunking" example illustrated in the Akka Stream-cookbook:
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, Inlet, Outlet, FlowShape}
import akka.util.ByteString
class Chunking(val chunkSize: Int) extends GraphStage[FlowShape[ByteString, ByteString]] {
val in = Inlet[ByteString]("Chunking.in")
val out = Outlet[ByteString]("Chunking.out")
override val shape = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private var buffer = ByteString.empty
setHandler(in, new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
buffer ++= elem
emitChunk()
}
override def onUpstreamFinish(): Unit = {
if (buffer.isEmpty)
completeStage()
else {
if (isAvailable(out)) emitChunk()
}
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (isClosed(in)) emitChunk()
else pull(in)
}
})
private def emitChunk(): Unit = {
if (buffer.isEmpty) {
if (isClosed(in)) completeStage() else pull(in)
}
else {
val (chunk, nextBuffer) = buffer.splitAt(chunkSize)
buffer = nextBuffer
push(out, chunk)
}
}
}
}
Note that emitChunk() handles the fixed-size chunking and onUpstreamFinish() is necessary for processing the last chunk of elements in the internal buffer.
Test-running with a sample text file "/path/to/file" which has content as below:
Millions of people worldwide are in for a disastrous future of hunger, drought and disease, according to a draft report from the United Nations' Intergovernmental Panel on Climate Change, which was leaked to the media this week.
import akka.actor.ActorSystem
import akka.stream.scaladsl._
import java.nio.file.Paths
implicit val system = ActorSystem("system")
implicit val executionContext = system.dispatcher
val chunkSize = 32
FileIO.fromPath(Paths.get("/path/to/file")).
via(new Chunking(chunkSize)).
map(_.utf8String).
runWith(Sink.seq)
// res1: scala.concurrent.Future[Seq[String]] = Future(Success(Vector(
// "Millions of people worldwide are",
// " in for a disastrous future of h",
// "unger, drought and disease, acco",
// "rding to a draft report from the",
// " United Nations' Intergovernment",
// "al Panel on Climate Change, whic",
// "h was leaked to the media this w",
// "eek."
// )))
Something like (in Scala, disclaimer: only mentally compiled) this, using statefulMapConcat, which allows
emitting zero or more frames per input element
maintaining state from element to element of what's yet to be emitted
val frameSize: Int = ???
require(frameSize > 0, "frame size must be positive")
Flow[ByteString].statefulMapConcat { () =>
var carry: ByteString = ByteString.empty
{ in =>
val len = carry.length + in.length
if (len < frameSize) {
// append to carry and emit nothing
carry = carry ++ in
Nil
} else if (len == frameSize) {
if (carry.nonEmpty) {
carry = ByteString.empty
List(carry ++ in)
} else List(in)
} else {
if (carry.isEmpty) {
val frames = len / frameSize
val (emit, suffix) = in.splitAt(frames * frameSize)
carry = suffix
emit.grouped(frameSize)
} else {
val (appendToCarry, inn) = in.splitAt(frameSize - carry.length)
val first = carry ++ appendToCarry
val frames = inn.length / frameSize
if (frames > 0) {
val (emit, suffix) = inn.splitAt(frames * frameSize)
carry = suffix
Iterator.single(first) ++ emit.grouped(frameSize)
} else {
carry = inn
List(first)
}
}
}
}
If in Java, note that carry ++ in can be expressed as carry.concat(in). It may be useful, in order to get around the restriction in Java around closing over non-final variables, to use a 1-element ByteString[] (e.g. ByteString[] carry = { ByteString.empty }).
I am using Tensorflow in order to recognize object in a provided picture , following this tutorial and using this repo I succeed to make my program return the object inside a picture .
For example this is the picture I used as input:
and here's the output of my program :
All I want is to get the color of the recognized item (red jersey for the last case),is that possible ?
Here's the code (from the last link just with small changes)
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package com.test.sec.compoment;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import org.tensorflow.DataType;
import org.tensorflow.Graph;
import org.tensorflow.Output;
import org.tensorflow.Session;
import org.tensorflow.Tensor;
import org.tensorflow.TensorFlow;
import org.tensorflow.types.UInt8;
/** Sample use of the TensorFlow Java API to label images using a pre-trained model. */
public class ImageRecognition {
private static void printUsage(PrintStream s) {
final String url =
"https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip";
s.println(
"Java program that uses a pre-trained Inception model (http://arxiv.org/abs/1512.00567)");
s.println("to label JPEG images.");
s.println("TensorFlow version: " + TensorFlow.version());
s.println();
s.println("Usage: label_image <model dir> <image file>");
s.println();
s.println("Where:");
s.println("<model dir> is a directory containing the unzipped contents of the inception model");
s.println(" (from " + url + ")");
s.println("<image file> is the path to a JPEG image file");
}
public void index() {
String modelDir = "C:/Users/Admin/Downloads/inception5h";
String imageFile = "C:/Users/Admin/Desktop/red-tshirt.jpg";
byte[] graphDef = readAllBytesOrExit(Paths.get(modelDir, "tensorflow_inception_graph.pb"));
List<String> labels =
readAllLinesOrExit(Paths.get(modelDir, "imagenet_comp_graph_label_strings.txt"));
byte[] imageBytes = readAllBytesOrExit(Paths.get(imageFile));
try (Tensor<Float> image = constructAndExecuteGraphToNormalizeImage(imageBytes)) {
float[] labelProbabilities = executeInceptionGraph(graphDef, image);
int bestLabelIdx = maxIndex(labelProbabilities);
System.out.println(
String.format("BEST MATCH: %s (%.2f%% likely)",
labels.get(bestLabelIdx),
labelProbabilities[bestLabelIdx] * 100f));
}
}
private static Tensor<Float> constructAndExecuteGraphToNormalizeImage(byte[] imageBytes) {
try (Graph g = new Graph()) {
GraphBuilder b = new GraphBuilder(g);
// Some constants specific to the pre-trained model at:
// https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip
//
// - The model was trained with images scaled to 224x224 pixels.
// - The colors, represented as R, G, B in 1-byte each were converted to
// float using (value - Mean)/Scale.
final int H = 224;
final int W = 224;
final float mean = 117f;
final float scale = 1f;
// Since the graph is being constructed once per execution here, we can use a constant for the
// input image. If the graph were to be re-used for multiple input images, a placeholder would
// have been more appropriate.
final Output<String> input = b.constant("input", imageBytes);
final Output<Float> output =
b.div(
b.sub(
b.resizeBilinear(
b.expandDims(
b.cast(b.decodeJpeg(input, 3), Float.class),
b.constant("make_batch", 0)),
b.constant("size", new int[] {H, W})),
b.constant("mean", mean)),
b.constant("scale", scale));
try (Session s = new Session(g)) {
return s.runner().fetch(output.op().name()).run().get(0).expect(Float.class);
}
}
}
private static float[] executeInceptionGraph(byte[] graphDef, Tensor<Float> image) {
try (Graph g = new Graph()) {
g.importGraphDef(graphDef);
try (Session s = new Session(g);
Tensor<Float> result =
s.runner().feed("input", image).fetch("output").run().get(0).expect(Float.class)) {
final long[] rshape = result.shape();
if (result.numDimensions() != 2 || rshape[0] != 1) {
throw new RuntimeException(
String.format(
"Expected model to produce a [1 N] shaped tensor where N is the number of labels, instead it produced one with shape %s",
Arrays.toString(rshape)));
}
int nlabels = (int) rshape[1];
return result.copyTo(new float[1][nlabels])[0];
}
}
}
private static int maxIndex(float[] probabilities) {
int best = 0;
for (int i = 1; i < probabilities.length; ++i) {
if (probabilities[i] > probabilities[best]) {
best = i;
}
}
return best;
}
private static byte[] readAllBytesOrExit(Path path) {
try {
return Files.readAllBytes(path);
} catch (IOException e) {
System.err.println("Failed to read [" + path + "]: " + e.getMessage());
System.exit(1);
}
return null;
}
private static List<String> readAllLinesOrExit(Path path) {
try {
return Files.readAllLines(path, Charset.forName("UTF-8"));
} catch (IOException e) {
System.err.println("Failed to read [" + path + "]: " + e.getMessage());
System.exit(0);
}
return null;
}
// In the fullness of time, equivalents of the methods of this class should be auto-generated from
// the OpDefs linked into libtensorflow_jni.so. That would match what is done in other languages
// like Python, C++ and Go.
static class GraphBuilder {
GraphBuilder(Graph g) {
this.g = g;
}
Output<Float> div(Output<Float> x, Output<Float> y) {
return binaryOp("Div", x, y);
}
<T> Output<T> sub(Output<T> x, Output<T> y) {
return binaryOp("Sub", x, y);
}
<T> Output<Float> resizeBilinear(Output<T> images, Output<Integer> size) {
return binaryOp3("ResizeBilinear", images, size);
}
<T> Output<T> expandDims(Output<T> input, Output<Integer> dim) {
return binaryOp3("ExpandDims", input, dim);
}
<T, U> Output<U> cast(Output<T> value, Class<U> type) {
DataType dtype = DataType.fromClass(type);
return g.opBuilder("Cast", "Cast")
.addInput(value)
.setAttr("DstT", dtype)
.build()
.<U>output(0);
}
Output<UInt8> decodeJpeg(Output<String> contents, long channels) {
return g.opBuilder("DecodeJpeg", "DecodeJpeg")
.addInput(contents)
.setAttr("channels", channels)
.build()
.<UInt8>output(0);
}
<T> Output<T> constant(String name, Object value, Class<T> type) {
try (Tensor<T> t = Tensor.<T>create(value, type)) {
return g.opBuilder("Const", name)
.setAttr("dtype", DataType.fromClass(type))
.setAttr("value", t)
.build()
.<T>output(0);
}
}
Output<String> constant(String name, byte[] value) {
return this.constant(name, value, String.class);
}
Output<Integer> constant(String name, int value) {
return this.constant(name, value, Integer.class);
}
Output<Integer> constant(String name, int[] value) {
return this.constant(name, value, Integer.class);
}
Output<Float> constant(String name, float value) {
return this.constant(name, value, Float.class);
}
private <T> Output<T> binaryOp(String type, Output<T> in1, Output<T> in2) {
return g.opBuilder(type, type).addInput(in1).addInput(in2).build().<T>output(0);
}
private <T, U, V> Output<T> binaryOp3(String type, Output<U> in1, Output<V> in2) {
return g.opBuilder(type, type).addInput(in1).addInput(in2).build().<T>output(0);
}
private Graph g;
}
}
You are using a code which predicts the label of the given image, i.e. classifies the image from some trained classes So you don't know the exact pixels of your object.
So, I suggest you do any of the following,
Use an object detector to detect the location of the object and get the bounding box. Then get the color of the most pixels.
Use a pixel-wise classification (segmentation) like this to get the exact pixels of your object.
Note, you may need to manually train the network (or model) for your object
Edit:
For Java object detection examples, have a look at this project which is coded for android, but it should be straightforward to use them in desktop applications. More specifically look into this part.
You don't need both object detection and segmentation at the same time but if you want, I think first try to train a model for segmentation using python (the link is provided above) then use the model in java similarly as the object detection models.
Edit 2:
I have added a simple object detection client in java which uses Tensorflow Object detection API models just to show you that you can use any frozen model in java.
Also, check this beautiful repository which uses pixel wise segmentation.
Use the below code snippet which gives the RGB color code,however as the image might contain different color pixels its upon you to decide a spot (Ex:center) and get the RGB code with vertical (Y) and Horizontal (X) co-ordinates.
//create image object from byte array
BufferedImage imageobj=null;
Color[][] imgcolor=null;
try {
imageobj=ImageIO.read(new ByteArrayInputStream(imageBytes));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if(imageobj!=null){
imgcolor=new Color[imageobj.getWidth()][imageobj.getHeight()];
for(int i=0;i<imageobj.getWidth();i++){
for(int j=0;j<imageobj.getHeight();j++){
imgcolor[i][j]=new Color(imageobj.getRGB(i, j));
}
}
}
if(imgcolor!=null && imgcolor.length>0){
System.out.println("Object Color "+imgcolor[imageobj.getWidth()/2][imageobj.getHeight()/2].toString());
}
You have first to remove the background pixel to keep only your object, then build a list with all remaining pixel, then compute the average color.
Concerning the color detection methodology, you may take a look on Color
Image Processing: Emerging Applications, Color Detection, and most especially How we handle color detection.
I'm making an iOS app that parses JSON data from a google spreadsheet. One of the issues with Google JSON data is that it includes unnecessary data that has to be removed. I'm new to iOS programming.
/*O_o*/google.visualization.Query.setResponse({"version":"0.6","reqId":"0","status":"ok","sig":"1400846503","table":{JSON DATA I NEED}});
I have done this in JAVA on Android using this code
int start = result.indexOf("{", result.indexOf("{") + 1);
int end = result.lastIndexOf("}");
String jsonResponse = result.substring(start, end);
My swift code
var something = "My google JSON Data"
let Start = String(something).characters.indexOf("{")!;
let substring1: String = something.substringFromIndex(Start);
something = substring1;
let End = String(something).characters.indexOf(")")!.distanceTo(something.endIndex);
let index3 = something.endIndex.advancedBy(-End);
let substring4: String = something.substringToIndex(index3)
What I'm asking is how do I get the index of the 2nd "{"
You should use NSJsonSerializer, but if you want to do it your way:
extension String {
func indexOf(target: String) -> Int {
if let range = self.rangeOfString(target) {
return self.startIndex.distanceTo(range.startIndex)
} else {
return -1
}
}
func indexOf(target: String, startIndex: Int) -> Int {
let startRange = self.startIndex.advancedBy(startIndex)
if let range = self.rangeOfString(target, options: .LiteralSearch, range: startRange..<self.endIndex) {
return self.startIndex.distanceTo(range.startIndex)
} else {
return -1
}
}
}
let end = myString.indexOf("{", startIndex: myString.indexOf("{") + 1)