Related
I have a system with HTTP POST requests and it runs with Spring 5 (standalone tomcat). In short it looks like this:
client (Apache AB) ----> micro service (java or golang) --> RabbitMQ --> Core(spring + tomcat).
The thing is, when I use my Java (Spring) service, it is ok. AB shows this output:
ab -n 1000 -k -s 2 -c 10 -s 60 -p test2.sh -A 113:113 -T 'application/json' https://127.0.0.1:8449/SecureChat/chat/v1/rest-message/send
This is ApacheBench, Version 2.3 <$Revision: 1807734 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
...
Completed 1000 requests
Finished 1000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 8449
SSL/TLS Protocol: TLSv1.2,ECDHE-RSA-AES256-GCM-SHA384,2048,256
Document Path: /rest-message/send
Document Length: 39 bytes
Concurrency Level: 10
Time taken for tests: 434.853 seconds
Complete requests: 1000
Failed requests: 0
Keep-Alive requests: 0
Total transferred: 498000 bytes
Total body sent: 393000
HTML transferred: 39000 bytes
Requests per second: 2.30 [#/sec] (mean)
Time per request: 4348.528 [ms] (mean)
Time per request: 434.853 [ms] (mean, across all concurrent
requests)
Transfer rate: 1.12 [Kbytes/sec] received
0.88 kb/s sent
2.00 kb/s total
Connection Times (ms)
min mean[+/-sd] median max
Connect: 4 14 7.6 17 53
Processing: 1110 4317 437.2 4285 8383
Waiting: 1107 4314 437.2 4282 8377
Total: 1126 4332 436.8 4300 8403
That is through TLS.
But when I try to use my Golang service I get timeout:
Benchmarking 127.0.0.1 (be patient)...apr_pollset_poll: The timeout specified has expired (70007)
Total of 92 requests completed
And this output:
ab -n 100 -k -s 2 -c 10 -s 60 -p test2.sh -T 'application/json' http://127.0.0.1:8089/
This is ApacheBench, Version 2.3 <$Revision: 1807734 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)...^C
Server Software:
Server Hostname: 127.0.0.1
Server Port: 8089
Document Path: /
Document Length: 39 bytes
Concurrency Level: 10
Time taken for tests: 145.734 seconds
Complete requests: 92
Failed requests: 1
(Connect: 0, Receive: 0, Length: 1, Exceptions: 0)
Keep-Alive requests: 91
Total transferred: 16380 bytes
Total body sent: 32200
HTML transferred: 3549 bytes
Requests per second: 0.63 [#/sec] (mean)
Time per request: 15840.663 [ms] (mean)
Time per request: 1584.066 [ms] (mean, across all concurrent requests)
Transfer rate: 0.11 [Kbytes/sec] received
0.22 kb/s sent
0.33 kb/s total
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 0.0 0 0
Processing: 1229 1494 1955.9 1262 20000
Waiting: 1229 1291 143.8 1262 2212
Total: 1229 1494 1955.9 1262 20000
That is through plane tcp.
I guess I have some mistakes in my code. I made it in one file
func initAmqp(rabbitUrl string) {
var err error
conn, err = amqp.Dial(rabbitUrl)
failOnError(err, "Failed to connect to RabbitMQ")
}
func main() {
err := gcfg.ReadFileInto(&cfg, "config.gcfg")
if err != nil {
log.Fatal(err);
}
PrintConfig(cfg)
if cfg.Section_rabbit.RabbitUrl != "" {
initAmqp(cfg.Section_rabbit.RabbitUrl);
}
mux := http.NewServeMux();
mux.Handle("/", NewLimitHandler(1000, newTestHandler()))
server := http.Server {
Addr: cfg.Section_basic.Port,
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
}
defer conn.Close();
log.Println(server.ListenAndServe());
}
func NewLimitHandler(maxConns int, handler http.Handler) http.Handler {
h := &limitHandler{
connc: make(chan struct{}, maxConns),
handler: handler,
}
for i := 0; i < maxConns; i++ {
h.connc <- struct{}{}
}
return h
}
func newTestHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler(w, r);
})
}
func handler(w http.ResponseWriter, r *http.Request) {
if b, err := ioutil.ReadAll(r.Body); err == nil {
fmt.Println("message is ", string(b));
res := publishMessages(string(b))
w.Write([]byte(res))
w.WriteHeader(http.StatusOK)
counter ++;
}else {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("500 - Something bad happened!"))
}
}
func publishMessages(payload string) string {
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
q, err = ch.QueueDeclare(
"", // name
false, // durable
false, // delete when unused
true, // exclusive
false, // noWait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
corrId := randomString(32)
log.Println("corrId ", corrId)
err = ch.Publish(
"", // exchange
cfg.Section_rabbit.RabbitQeue, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
DeliveryMode: amqp.Transient,
ContentType: "application/json",
CorrelationId: corrId,
Body: []byte(payload),
Timestamp: time.Now(),
ReplyTo: q.Name,
})
failOnError(err, "Failed to Publish on RabbitMQ")
defer ch.Close();
result := "";
for d := range msgs {
if corrId == d.CorrelationId {
failOnError(err, "Failed to convert body to integer")
log.Println("result = ", string(d.Body))
return string(d.Body);
}else {
log.Println("waiting for result = ")
}
}
return result;
}
Can someone help?
EDIT
here are my variables
type limitHandler struct {
connc chan struct{}
handler http.Handler
}
var conn *amqp.Connection
var q amqp.Queue
EDIT 2
func (h *limitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
select {
case <-h.connc:
fmt.Println("ServeHTTP");
h.handler.ServeHTTP(w, req)
h.connc <- struct{}{}
default:
http.Error(w, "503 too busy", http.StatusServiceUnavailable)
}
}
EDIT 3
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
}
I need to create a UDF to be used in pyspark python which uses a java object for its internal calculations.
If it were a simple python I would do something like:
def f(x):
return 7
fudf = pyspark.sql.functions.udf(f,pyspark.sql.types.IntegerType())
and call it using:
df = sqlContext.range(0,5)
df2 = df.withColumn("a",fudf(df.id)).show()
However, the implementation of the function I need is in java and not in python. I need to wrap it somehow so I can call it in a similar way from python.
My first try was to do implement the java object, then wrap it in python in pyspark and convert that to UDF. That failed with serialization error.
Java code:
package com.test1.test2;
public class TestClass1 {
Integer internalVal;
public TestClass1(Integer val1) {
internalVal = val1;
}
public Integer do_something(Integer val) {
return internalVal;
}
}
pyspark code:
from py4j.java_gateway import java_import
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
java_import(sc._gateway.jvm, "com.test1.test2.TestClass1")
a = sc._gateway.jvm.com.test1.test2.TestClass1(7)
audf = udf(a,IntegerType())
error:
---------------------------------------------------------------------------
Py4JError Traceback (most recent call last)
<ipython-input-2-9756772ab14f> in <module>()
4 java_import(sc._gateway.jvm, "com.test1.test2.TestClass1")
5 a = sc._gateway.jvm.com.test1.test2.TestClass1(7)
----> 6 audf = udf(a,IntegerType())
/usr/local/spark/python/pyspark/sql/functions.py in udf(f, returnType)
1595 [Row(slen=5), Row(slen=3)]
1596 """
-> 1597 return UserDefinedFunction(f, returnType)
1598
1599 blacklist = ['map', 'since', 'ignore_unicode_prefix']
/usr/local/spark/python/pyspark/sql/functions.py in __init__(self, func, returnType, name)
1556 self.returnType = returnType
1557 self._broadcast = None
-> 1558 self._judf = self._create_judf(name)
1559
1560 def _create_judf(self, name):
/usr/local/spark/python/pyspark/sql/functions.py in _create_judf(self, name)
1565 command = (func, None, ser, ser)
1566 sc = SparkContext.getOrCreate()
-> 1567 pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command, self)
1568 ctx = SQLContext.getOrCreate(sc)
1569 jdt = ctx._ssql_ctx.parseDataType(self.returnType.json())
/usr/local/spark/python/pyspark/rdd.py in _prepare_for_python_RDD(sc, command, obj)
2297 # the serialized command will be compressed by broadcast
2298 ser = CloudPickleSerializer()
-> 2299 pickled_command = ser.dumps(command)
2300 if len(pickled_command) > (1 << 20): # 1M
2301 # The broadcast will have same life cycle as created PythonRDD
/usr/local/spark/python/pyspark/serializers.py in dumps(self, obj)
426
427 def dumps(self, obj):
--> 428 return cloudpickle.dumps(obj, 2)
429
430
/usr/local/spark/python/pyspark/cloudpickle.py in dumps(obj, protocol)
644
645 cp = CloudPickler(file,protocol)
--> 646 cp.dump(obj)
647
648 return file.getvalue()
/usr/local/spark/python/pyspark/cloudpickle.py in dump(self, obj)
105 self.inject_addons()
106 try:
--> 107 return Pickler.dump(self, obj)
108 except RuntimeError as e:
109 if 'recursion' in e.args[0]:
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in dump(self, obj)
222 if self.proto >= 2:
223 self.write(PROTO + chr(self.proto))
--> 224 self.save(obj)
225 self.write(STOP)
226
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save(self, obj)
284 f = self.dispatch.get(t)
285 if f:
--> 286 f(self, obj) # Call unbound method with explicit self
287 return
288
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save_tuple(self, obj)
566 write(MARK)
567 for element in obj:
--> 568 save(element)
569
570 if id(obj) in memo:
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save(self, obj)
284 f = self.dispatch.get(t)
285 if f:
--> 286 f(self, obj) # Call unbound method with explicit self
287 return
288
/usr/local/spark/python/pyspark/cloudpickle.py in save_function(self, obj, name)
191 if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None:
192 #print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule)
--> 193 self.save_function_tuple(obj)
194 return
195 else:
/usr/local/spark/python/pyspark/cloudpickle.py in save_function_tuple(self, func)
234 # create a skeleton function object and memoize it
235 save(_make_skel_func)
--> 236 save((code, closure, base_globals))
237 write(pickle.REDUCE)
238 self.memoize(func)
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save(self, obj)
284 f = self.dispatch.get(t)
285 if f:
--> 286 f(self, obj) # Call unbound method with explicit self
287 return
288
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save_tuple(self, obj)
552 if n <= 3 and proto >= 2:
553 for element in obj:
--> 554 save(element)
555 # Subtle. Same as in the big comment below.
556 if id(obj) in memo:
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save(self, obj)
284 f = self.dispatch.get(t)
285 if f:
--> 286 f(self, obj) # Call unbound method with explicit self
287 return
288
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save_list(self, obj)
604
605 self.memoize(obj)
--> 606 self._batch_appends(iter(obj))
607
608 dispatch[ListType] = save_list
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in _batch_appends(self, items)
637 write(MARK)
638 for x in tmp:
--> 639 save(x)
640 write(APPENDS)
641 elif n:
/home/mendea3/anaconda2/lib/python2.7/pickle.pyc in save(self, obj)
304 reduce = getattr(obj, "__reduce_ex__", None)
305 if reduce:
--> 306 rv = reduce(self.proto)
307 else:
308 reduce = getattr(obj, "__reduce__", None)
/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
811 answer = self.gateway_client.send_command(command)
812 return_value = get_return_value(
--> 813 answer, self.gateway_client, self.target_id, self.name)
814
815 for temp_arg in temp_args:
/usr/local/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
43 def deco(*a, **kw):
44 try:
---> 45 return f(*a, **kw)
46 except py4j.protocol.Py4JJavaError as e:
47 s = e.java_exception.toString()
/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
310 raise Py4JError(
311 "An error occurred while calling {0}{1}{2}. Trace:\n{3}\n".
--> 312 format(target_id, ".", name, value))
313 else:
314 raise Py4JError(
Py4JError: An error occurred while calling o18.__getnewargs__. Trace:
py4j.Py4JException: Method __getnewargs__([]) does not exist
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:335)
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:344)
at py4j.Gateway.invoke(Gateway.java:252)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:209)
at java.lang.Thread.run(Thread.java:745)
EDIT: I also tried to make the java class serializable but to no avail.
My second attempt was to define the UDF in java to begin with but that failed as I am not sure how to correctly wrap it:
java code:
package com.test1.test2;
import org.apache.spark.sql.api.java.UDF1;
public class TestClassUdf implements UDF1<Integer, Integer> {
Integer retval;
public TestClassUdf(Integer val) {
retval = val;
}
#Override
public Integer call(Integer arg0) throws Exception {
return retval;
}
}
but how would I use it?
I tried:
from py4j.java_gateway import java_import
java_import(sc._gateway.jvm, "com.test1.test2.TestClassUdf")
a = sc._gateway.jvm.com.test1.test2.TestClassUdf(7)
dfint = sqlContext.range(0,15)
df = dfint.withColumn("a",a(dfint.id))
but I get:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-514811090b5f> in <module>()
3 a = sc._gateway.jvm.com.test1.test2.TestClassUdf(7)
4 dfint = sqlContext.range(0,15)
----> 5 df = dfint.withColumn("a",a(dfint.id))
TypeError: 'JavaObject' object is not callable
and I tried to use a.call instead of a:
df = dfint.withColumn("a",a.call(dfint.id))
but got:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
in ()
3 a = sc._gateway.jvm.com.test1.test2.TestClassUdf(7)
4 dfint = sqlContext.range(0,15)
----> 5 df = dfint.withColumn("a",a.call(dfint.id))
/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
796 def __call__(self, *args):
797 if self.converters is not None and len(self.converters) > 0:
--> 798 (new_args, temp_args) = self._get_args(args)
799 else:
800 new_args = args
/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in _get_args(self, args)
783 for converter in self.gateway_client.converters:
784 if converter.can_convert(arg):
--> 785 temp_arg = converter.convert(arg, self.gateway_client)
786 temp_args.append(temp_arg)
787 new_args.append(temp_arg)
/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_collections.py in convert(self, object, gateway_client)
510 HashMap = JavaClass("java.util.HashMap", gateway_client)
511 java_map = HashMap()
--> 512 for key in object.keys():
513 java_map[key] = object[key]
514 return java_map
TypeError: 'Column' object is not callable
Any help would be appriciated.
I got this working with the help of another question (and answer) of your own about UDAFs.
Spark provides a udf() method for wrapping Scala FunctionN, so we can wrap the Java function in Scala and use that. Your Java method needs to be static or on a class that implements Serializable.
package com.example
import org.apache.spark.sql.UserDefinedFunction
import org.apache.spark.sql.functions.udf
class MyUdf extends Serializable {
def getUdf: UserDefinedFunction = udf(() => MyJavaClass.MyJavaMethod())
}
Usage in PySpark:
def my_udf():
from pyspark.sql.column import Column, _to_java_column, _to_seq
pcls = "com.example.MyUdf"
jc = sc._jvm.java.lang.Thread.currentThread() \
.getContextClassLoader().loadClass(pcls).newInstance().getUdf().apply
return Column(jc(_to_seq(sc, [], _to_java_column)))
rdd1 = sc.parallelize([{'c1': 'a'}, {'c1': 'b'}, {'c1': 'c'}])
df1 = rdd1.toDF()
df2 = df1.withColumn('mycol', my_udf())
As with the UDAF in your other question and answer, we can pass columns into it with return Column(jc(_to_seq(sc, ["col1", "col2"], _to_java_column)))
In lines with https://dzone.com/articles/pyspark-java-udf-integration-1 you could define UDF1 with in Java using
public class AddNumber implements UDF1<Long, Long> {
#Override
public Long call(Long num) throws Exception {
return (num + 5);
}
}
And then after adding the jar to your pyspark with --package <your-jar>
you can use it in pyspark as:
from pyspark.sql import functions as F
from pyspark.sql.types import LongType
>>> df = spark.createDataFrame([float(i) for i in range(100)], FloatType()).toDF("a")
>>> spark.udf.registerJavaFunction("addNumber", "com.example.spark.AddNumber", LongType())
>>> df.withColumn("b", F.expr("addNumber(a)")).show(5)
+---+---+
| a| b|
+---+---+
|0.0| 5|
|1.0| 6|
|2.0| 7|
|3.0| 8|
|4.0| 8|
+---+---+
only showing top 5 rows
Hi I am new to Hadoop Mapreduce programming. Actually I have a requirement like below:
larger file i.e the input file input.txt
101 Vince 12000
102 James 33
103 Tony 32
104 John 25
105 Nataliya 19
106 Anna 20
107 Harold 29
And this is the smaller file lookupfile.txt
101 Vince 12000
102 James 10000
103 Tony 20000
104 John 25000
105 Nataliya 15000
Now what we want is to get those results which have common Id Number. So, in order to achieve this use smaller file as look up file and larger file as input file. The complete java code and explanation of each component is given below:
This is the result we will get after running the above code.
102 James 33 10000
103 Tony 32 20000
104 John 25 25000
105 Nataliya 19 15000
Code:
public class Join extends Configured implements Tool
{
public static class JoinMapper extends Mapper
{
Path[] cachefiles = new Path[0]; //To store the path of lookup files
List exEmployees = new ArrayList();//To store the data of lookup files
/********************Setup Method******************************************/
#Override
public void setup(Context context)
{
Configuration conf = context.getConfiguration();
try
{
cachefiles = DistributedCache.getLocalCacheFiles(conf);
BufferedReader reader = new BufferedReader(new FileReader(cachefiles[0].toString()));
String line;
while ((line = reader.readLine())!= null)
{
exEmployees.add(line); //Data of lookup files get stored in list object
}
}
catch (IOException e)
{
e.printStackTrace();
}
}
/************************setup method ends***********************************************/
/********************Map Method******************************************/
public void map(LongWritable key, Text value, Context context)throws IOException, InterruptedException
{
String[] line = value.toString().split("\t");
for (String e : exEmployees)
{
String[] listLine = e.toString().split("\t");
if(line[0].equals(listLine[0]))
{
context.write(new Text(line[0]), new Text(line[1]+"\t"+line[2]+"\t"+listLine[2]));
}
}
} //map method ends
/***********************************************************************/
}
/********************run Method******************************************/
public int run(String[] args) throws Exception
{
Configuration conf = new Configuration();
Job job = new Job(conf, "aggprog");
job.setJarByClass(Join.class);
DistributedCache.addCacheFile(new Path(args[0]).toUri(), job.getConfiguration());
FileInputFormat.addInputPath(job, new Path(args [1]));
FileOutputFormat.setOutputPath(job, new Path(args [2]));
job.setMapperClass(JoinMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
return (job.waitForCompletion(true) ? 0 : 1);
}
public static void main (String[] args) throws Exception
{
int ecode = ToolRunner.run(new Join(), args);
System.exit(ecode);
}
}
Execution Command :
case1:
hadoop jar '/home/cloudera/Desktop/DistributedCache.jar' Join My_Job/MultiInput_1/Input/input.txt My_Job/MultiInput_1/Input/smallerinput.txt My_Job/MultiInput_1/My_Output
case2:
hadoop jar '/home/cloudera/Desktop/DistributedCache.jar' Join My_Job/MultiInput_1/Input/input.txt My_Job/MultiInput_1/Input/smallerinput.txt My_Job/MultiInput_1/My_Output
I have tried above two commands, but it is not working. I don't Know what the problem is and also where the problem is. I am unable to execute the above code.
finally i tried below code it worked
hadoop jar '/home/cloudera/Desktop/DistributedCache.jar' Join hdfs/Input/smallerfile.txt hdfs/Input/input.txt My_Job/MultiInput_1/MyOutput
I found my mistake. I was checking the large file with the small file. But, when I tried the reverse way it worked for me, but the output was not as expected.
Expected output is:
101 Vince 12000
102 James 33 10000
103 Tony 32 20000
104 John 25 25000
105 Nataliya 19 15000
106 Anna 20
107 Harold 29
But my output is:
101 Vince 12000
102 James 33 10000
103 Tony 32 20000
104 John 25 25000
105 Nataliya 19 15000
106 Anna 20
107 Harold 29
Can somebody help me?
Yes user3475485. Your files should be put in hdfs for this code to run or because your are using Genericoptionsparse you can use this format
hadoop jar jarname.jar drivername -files file1,file2 should work for you.
I'm trying to use JNI to import a c++ library that I will use in my java project. I have successfully gotten the library method to be called and am trying to return an ArrayList of Point objects from the JNI function. However, I'm getting an error with a rather vague description ('Problematic frame:
V [libjvm.dylib+0x30bab5] JNI_ArgumentPusherVaArg::JNI_ArgumentPusherVaArg(_jmethodID*, __va_list_tag*)+0xd
'). I have no clue what's going on, does anyone know what's going on or how I should interpret this console output? I can attach the log file if anyone wants it.
Console output:
#
# A fatal error has been detected by the Java Runtime Environment:
#
# SIGSEGV (0xb) at pc=0x000000010550bab5, pid=4633, tid=6403
#
# JRE version: Java(TM) SE Runtime Environment (7.0_51-b13) (build 1.7.0_51-b13)
# Java VM: Java HotSpot(TM) 64-Bit Server VM (24.51-b03 mixed mode bsd-amd64 compressed oops)
# Problematic frame:
# V [libjvm.dylib+0x30bab5] JNI_ArgumentPusherVaArg::JNI_ArgumentPusherVaArg(_jmethodID*, __va_list_tag*)+0xd
#
# Failed to write core dump. Core dumps have been disabled. To enable core dumping, try "ulimit -c unlimited" before starting Java again
#
# An error report file with more information is saved as:
# /Users/zalbhathena/Documents/workspace/Thesis-Test-Application/HPA* Program/hs_err_pid4633.log
#
# If you would like to submit a bug report, please visit:
# http://bugreport.sun.com/bugreport/crash.jsp
#
Hello World!
ASDFse_dcdt.cpp 284 : Starting init()...
SeDcdt::initfoo.512se_dcdt.cpp 303 : Calculating External Polygon for domain with 4 points...
se_dcdt.cpp 317 : Creating External Polygon...
SeDcdt::initse_dcdt.cpp 326 : Backface v1: -14 -14
se_dcdt.cpp 329 : Backface v2: -14 14
se_dcdt.cpp 332 : Backface v3: 14 14
se_dcdt.cpp 335 : Backface v4: 14 -14
se_triangulator.cpp 417 : insert_point_in_edge...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...4
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 387 : insert_point_in_face...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...3
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 417 : insert_point_in_edge...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...4
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 387 : insert_point_in_face...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...3
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 417 : insert_point_in_edge...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...4
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 387 : insert_point_in_face...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...3
se_triangulator.cpp 538 : Ok.
se_triangulator.cpp 387 : insert_point_in_face...
se_triangulator.cpp 473 : propagate_delaunay...
se_triangulator.cpp 480 : optimizing in constrained mode...3
se_triangulator.cpp 538 : Ok.
1
2
3
4
HPAProgram.cpp
/*
* HPAProgram.c++
*
* Created on: Feb 4, 2014
* Author: zalbhathena
*/
//=#include <jni.h>
#include <stdio.h>
#include "HPAProgram.h"
#include "DCDTWrapper.h"
JNIEXPORT jobject JNICALL Java_HPAProgram_sayHello (JNIEnv *env, jobject obj) {
printf("Hello World!\n");
GsArray<GsPnt2> edges;
create_dcdt(&edges);
printf("1\n");
jclass arraylist_class = (*env).FindClass("java/util/ArrayList");
jclass point_class = (*env).FindClass("java/awt/Point");
jmethodID init_arraylist = (*env).GetMethodID(arraylist_class, "<init>", "()V");
jmethodID init_point = (*env).GetMethodID(point_class, "<init>", "(II)V");
jmethodID add_arraylist = (*env).GetMethodID(arraylist_class, "add", "(java/lang/Object)V");
jobject return_obj = (*env).NewObject(arraylist_class, init_arraylist);
printf("2\n");
for (int n=0;n<edges.size();n++)
{
jfloat x = 1;
jfloat y = 1;
printf("3\n");
jobject point_obj = (*env).NewObject(point_class, init_point,x,y);
printf("4\n");
(*env).CallVoidMethod(return_obj, add_arraylist, point_obj);
printf("5\n");
}
return nullptr;
}
DCDTWrapper.cpp
# define END 12345.6
# define FIRST_EXAMPLE Example1
//# include "DCDTsrc/se_dcdt.h"
//# include "DCDTsrc/gs_polygon.h"
//# include <stdlib.h>
# include "DCDTWrapper.h"
static double Example1[] =
{ -10, -10, 10, -10, 10, 10, -10, 10, END,
1, 1, 7, 3, 3, 8, END,
END };
static const double* CurExample = FIRST_EXAMPLE;
static SeDcdt TheDcdt;
static GsPolygon CurPath;
static GsPolygon CurChannel;
static float CurX1=0, CurY1=0, CurX2=0, CurY2=0;
static int CurSelection=0; // -2,-1: moving point, >0: moving polygon
void create_dcdt (GsArray<GsPnt2>* edges)
{
printf("ASDF");
const double* data = CurExample;
GsPolygon pol;
// domain:
while ( *data!=END ) { pol.push().set((float)data[0],(float)data[1]); data+=2; }
TheDcdt.init ( pol, 0.00001f );
while ( *++data!=END )
{ pol.size(0);
while ( *data!=END ) { pol.push().set((float)data[0],(float)data[1]); data+=2; }
TheDcdt.insert_polygon ( pol );
}
GsArray<GsPnt2> unconstr;
TheDcdt.get_mesh_edges(edges, &unconstr);
for(int i = 0; i < unconstr.size(); i++) {
edges -> push(unconstr.get(i));
}
}
HPAProgram.java
import java.util.ArrayList;
public class HPAProgram {
/* static {
//System.out.println(new File(".").getAbsolutePath());
System.loadLibrary("libhpaprogram"); // hello.dll (Windows) or libhello.so (Unixes)
}
//private native void sayHello();
*/
public native ArrayList<String> sayHello();
public static void main(String[] args) {
System.loadLibrary("HPAProgram");
HPAProgram s = new HPAProgram();
s.sayHello();
MapWindowController map = new MapWindowController();
}
}
Function addfrom class ArrayList has return type boolean not void so in c code you have to call CallBooleanMethod from env instead of CallVoidMethod.
Edit: And you need to correct the signature of the add method as well.
You need to change the method signature for "add" element in arrayList as below:
jmethodID add_arraylist = (*env).GetMethodID(arraylist_class, "add", "(java/lang/Object)Z");
instead of
jmethodID add_arraylist = (*env).GetMethodID(arraylist_class, "add", "(java/lang/Object)V");
Upon calling the following I code snipit:
Message message_in = null;
inbox instanceof IMAPFolder
IMAPFolder f = (IMAPFolder)inbox;
f.idle();
System.out.println("IDLE done");
message_in = inbox.getMessage(inbox.getMessageCount());
message_in.setFlag(Flags.Flag.DELETED, true);
inbox.expunge();
I receive the error message:
Got 1 new messages
***********************************************************
------------ Message 1 ------------
DONE
A4 OK IDLE completed.
A5 FETCH 13 (ENVELOPE INTERNALDATE RFC822.SIZE)
IDLE done
* 13 FETCH (ENVELOPE ("Wed, 29 Aug 2012 13:25:15 -0500" "Doc Com: Voice msg from Outside caller 5555555555 Unread:2" (("Support" NIL "support" "example.com")) NIL NIL (("Support" NIL "support" "example.com")) NIL NIL "<94BA85B8-FC4D-4193-B386-7FD2C1DE1B1F#example.com>" "<873439BD-8640-47D9-BF88-5FD3521B8173#example.com>") INTERNALDATE "29-Aug-2012 13:25:17 -0500" RFC822.SIZE 967)
A5 OK FETCH completed.
A6 STORE 13 +FLAGS (\Deleted)
* 13 FETCH (FLAGS (\Deleted \Recent))
A6 OK STORE completed.
A7 EXPUNGE
* 13 EXPUNGE
* 12 EXPUNGE
* 11 EXPUNGE
* 10 EXPUNGE
* 9 EXPUNGE
* 8 EXISTS
A7 OK EXPUNGE completed.
javax.mail.MessageRemovedException
at com.sun.mail.imap.IMAPMessage.checkExpunged(IMAPMessage.java:205)
at com.sun.mail.imap.IMAPMessage.loadBODYSTRUCTURE(IMAPMessage.java:1305)
at com.sun.mail.imap.IMAPMessage.getContentType(IMAPMessage.java:450)
at javax.mail.internet.MimeBodyPart.isMimeType(MimeBodyPart.java:1050)
at javax.mail.internet.MimeMessage.isMimeType(MimeMessage.java:986)
at com.example.vmmonitor.main.VMMonitor.getText(VMMonitor.java:211)
at com.example.vmmonitor.main.VMMonitor.access$000(VMMonitor.java:27)
at com.example.vmmonitor.main.VMMonitor$1.messagesAdded(VMMonitor.java:109)
at javax.mail.event.MessageCountEvent.dispatch(MessageCountEvent.java:150)
at javax.mail.EventQueue.run(EventQueue.java:134)
at java.lang.Thread.run(Thread.java:680)
DEBUG IMAP: IMAPProtocol noop
I was able to patch the issues by adding a Thread.sleep() as follows:
What is the issue? Why am I able to flag a message as DELETED but I am not able to expunge the mailbox?
Message message_in = null;
inbox instanceof IMAPFolder
IMAPFolder f = (IMAPFolder)inbox;
f.idle();
System.out.println("IDLE done");
message_in = inbox.getMessage(inbox.getMessageCount());
message_in.setFlag(Flags.Flag.DELETED, true);
Thread.sleep(2000);/*****************bug patch***********/
inbox.expunge();
This multithreaded program is not accessing the inbox resource in a non-thread safe manor. The program is defined in a manor such that inbox.expunge(); and an additional functioning are mutually accessing the mailbox and hence the exception is thrown.