I have made an event system, however, it is too slow.
The issue, is that there are multiple entries in a map that I never actually added. I don't understand how they get there.
public class OrdinalMap<V> {
private final Map<Integer, V> map;
public OrdinalMap(Class<? extends Enum<?>> valueType, V virginValue) {
map = new HashMap<Integer, V>();
Enum<?>[] enums = valueType.getEnumConstants();
for (int i = 0; i < enums.length; i++) {
put(enums[i].ordinal(), virginValue);
}
}
public OrdinalMap(Class<? extends Enum<?>> valueType) {
this(valueType, null);
}
public V put(Integer key, V value) {
return map.put(key, value);
}
public V get(Object o) {
return map.get(o);
}
public Set<Entry<Integer, V>> entrySet() {
return map.entrySet();
}
}
I want to make dispatchEvent faster (less iterations). It has too many iterations because of registerListener
There are event handler methods inside of all of the other priorities, when they shouldn't be there. I can't figure out why there are there, but I'm certain it's in registerListener. Because they are inside all priorities, I have to use this check:
if (mapping.getKey().getAnnotation(EventHandler.class).priority().ordinal() == entry.getKey()) {
Which makes it even slower.
#Override
public void dispatchEvent(Event event) {
OrdinalMap<Map<Method, EventListener>> priorityMap = getRegistry().get(event.getClass());
if (priorityMap != null) {
CancellableEvent cancellableEvent = null;
boolean cancellable;
if (cancellable = event instanceof CancellableEvent) {
cancellableEvent = (CancellableEvent) event;
if (cancellableEvent.isCancelled()) return;
}
try {
for (Entry<Integer, Map<Method, EventListener>> entry : priorityMap.entrySet()) {
for (Entry<Method, EventListener> mapping : entry.getValue().entrySet()) {
if (mapping.getKey().getAnnotation(EventHandler.class).priority().ordinal() == entry.getKey()) {
mapping.getKey().invoke(mapping.getValue(), event);
if (cancellable && cancellableEvent.isCancelled()) return;
}
}
}
} catch (InvocationTargetException | IllegalAccessException e) {
e.printStackTrace();
}
}
}
#Override
public void registerListener(EventListener listener) {
for (Method method : listener.getClass().getMethods()) {
EventHandler handler = method.getAnnotation(EventHandler.class);
if (handler != null) {
Class<?>[] parameters = method.getParameterTypes();
if (parameters.length == 1) {
#SuppressWarnings("unchecked")
Class<? extends Event> event = (Class<? extends Event>) parameters[0];
EventPriority priority = handler.priority();
OrdinalMap<Map<Method, EventListener>> priorityMap = getRegistry().get(event);
if (priorityMap == null) {
priorityMap = new OrdinalMap<Map<Method, EventListener>>(EventPriority.class, (Map<Method, EventListener>) new HashMap<Method, EventListener>());
}
Map<Method, EventListener> methodMap = priorityMap.get(priority.ordinal());
methodMap.put(method, listener);
priorityMap.put(priority.ordinal(), methodMap);
getRegistry().put(event, priorityMap);
}
}
}
}
You are using maps so consider using thiere benefits instead of iterating on all entries
if (mapping.getKey().getAnnotation(EventHandler.class).priority().ordinal() == entry.getKey()) {
comparing two hahmap keys to find a match is not really a good idea.
how about the following, i hope i didn't make any thinking mistake
Set<Integer> priorityMapKeySet = priorityMap.keySet();
for (Map<Method, EventListener> mapping : priorityMap.values()) {
if (priorityMapKeySet.contains(mapping.getKey().getAnnotation(EventHandler.class).priority().ordinal())) {
mapping.getKey().invoke(mapping.getValue(), event);
if (cancellable && cancellableEvent.isCancelled()) return;
}
}
here you no longer have the outer for loop
My bad, didn't pay attention enough ...
But the idea is the same, when using hashmaps / hashsets one should always try to use get / contains instead of iterating, and for that one needs to design the registry the way that makes this prossible
Would the following work for your needs ? (untested)
private final static class Registry {
private final static Map<String, Set<Integer>> prioritySetByEventMap = new HashMap<>();
private final static Map<String, EventListener> eventListenerByEventAndPriorityMap = new HashMap<>();
private final static Map<String, Method> methodByEventAndListenerMap = new HashMap<>();
public static Set<Integer> getPrioritySetByEvent(Class<Event> event) {
return prioritySetByEventMap.get(event.getName());
}
public static synchronized void registerEventByPriority(Class<Event> event, Integer priority) {
Set<Integer> ps = prioritySetByEventMap.get(event.getName());
if(ps == null) {
ps = new HashSet<>();
prioritySetByEventMap.put(event.getName(), ps);
}
ps.add(priority);
}
public static EventListener getEventListenerByEventAndPriority(Class<Event> event, Integer priority) {
String key = event.getName() + "-" + priority;
return eventListenerByEventAndPriorityMap.get(key);
}
public static synchronized void registerEventListenerByEventAndPriority(Class<Event> event, Integer priority, EventListener listener) {
String key = event.getName() + "-" + priority;
eventListenerByEventAndPriorityMap.put(key, listener);
}
public static Method getMethodByEventAndListener(Class<Event> event, EventListener listener) {
String key = listener.toString() + "-" + event.getName();
return methodByEventAndListenerMap.get(key);
}
public static synchronized void registerMethodByEventAndListener(Class<Event> event, EventListener listener, Method method) {
String key = listener.toString() + "-" + event.getName();
methodByEventAndListenerMap.put(key, method);
}
}
and
public void registerListener(EventListener listener) {
for (Method method : listener.getClass().getMethods()) {
EventHandler handler = method.getAnnotation(EventHandler.class);
if (handler != null) {
Class<?>[] parameters = method.getParameterTypes();
if (parameters.length == 1) {
Class<Event> event = (Class<Event>) parameters[0];
EventPriority priority = handler.priority();
Registry.registerEventByPriority(event, priority.ordinal());
Registry.registerEventListenerByEventAndPriority(event, priority.ordinal(), listener);
Registry.registerMethodByEventAndListener(event, listener, method);
}
}
}
}
public void dispatchEvent(Event event) {
Set<Integer> prioritySet = Registry.getPrioritySetByEvent((Class<Event>) event.getClass());
if (prioritySet != null) {
CancellableEvent cancellableEvent = null;
boolean cancellable;
if (cancellable = event instanceof CancellableEvent) {
cancellableEvent = (CancellableEvent) event;
if (cancellableEvent.isCancelled())
return;
}
try {
for(Integer priority : prioritySet) {
EventListener listener = Registry.getEventListenerByEventAndPriority((Class<Event>) event.getClass(), priority);
if(listener != null) {
Method m = Registry.getMethodByEventAndListener((Class<Event>) event.getClass(), listener);
if(m != null) {
m.invoke(listener, event);
if (cancellable && cancellableEvent.isCancelled()) {
return;
}
}
}
}
} catch (InvocationTargetException | IllegalAccessException e) {
e.printStackTrace();
}
}
}
Related
In the server,when receiving request login from every client,I will cache the data
using ConcurrentHashMap<String, String>()in the method channelRead,Can I get the value of the ConcurrentHashMap<String, String>() by defing a public methodpublic synchronized static Map<String, String> getClientMap() anywhere? And I want to define a schedule task to execute the opreation of clearing the cache whose data in the above ConcurrentHashMap<String, String>(),how can I do in Netty? I try many times ,but failed.
This is my code in ChannelHandler:
public class LoginAuthRespHandler extends ChannelInboundHandlerAdapter {
private static final Logger LOGGER = LoggerFactory.getLogger(LoginAuthRespHandler.class);
private static final Map<String, String> nodeCheck = new ConcurrentHashMap<String, String>();
private String[] whiteList = { "127.0.0.1", "192.168.56.1" };
#Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
AlarmMessage message = (AlarmMessage) msg;
//judge the messsge's type
if (message.getHeader() != null && message.getHeader().getType() == MessageType.LOGIN_REQ.value()) {
String nodeIndex = ctx.channel().remoteAddress().toString();
AlarmMessage loginResp = null;
if (nodeCheck.containsKey(nodeIndex)) {
loginResp = buildResponse(ResultType.FAIL);
} else {
InetSocketAddress address = (InetSocketAddress) ctx.channel().remoteAddress();
String ip = address.getAddress().getHostAddress();
boolean isOK = false;
for (String WIP : whiteList) {
if (WIP.equals(ip)) {
isOK = true;
break;
}
}
loginResp = isOK ? buildResponse(ResultType.SUCCESS) : buildResponse(ResultType.FAIL);
if (isOK)
//add a value
nodeCheck.put(nodeIndex, message.getBody().toString());
System.out.println(nodeCheck.get(nodeIndex));
}
ctx.writeAndFlush(loginResp);
} else {
ctx.fireChannelRead(msg);
}
}
private AlarmMessage buildResponse(ResultType result) {
AlarmMessage message = new AlarmMessage();
Header header = new Header();
header.setType(MessageType.LOGIN_RESP.value());
message.setHeader(header);
message.setBody(result.value());
return message;
}
#Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
String nodeIndex = ctx.channel().remoteAddress().toString();
ctx.close();
if(nodeCheck.containsKey(nodeIndex)){
nodeCheck.remove(nodeIndex);
}
}
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
cause.printStackTrace();
ctx.close();
ctx.fireExceptionCaught(cause);
}
public static Map<String, String> getNodeCheck() {
return nodeCheck;
}
}
And this is my code in main thread:
ScheduledFuture<?> sf = executor.scheduleAtFixedRate(new Runnable() {
#Override
public void run() {
HashSet<String> clients = new HashSet<>();
Map<String,String> map = LoginAuthRespHandler.getNodeCheck();
System.out.println(map.size());
for (String key:map.keySet()) {
clients.add(map.get(key));
}
//update data
try{
MySQLDB.updateClientStatus(clients);
}catch (Exception e){
e.printStackTrace();
}
//clear map
map.clear();
clients.clear();
}
},10,10,TimeUnit.SECONDS);
}
I am reading this page about coroutines in Python and this Wikipedia page. I saw that there are a few libraries in Java implementing coroutines.
My question is: is there any known reason why the Java designers decided not to implement coroutines so far and is there any plan to include it in a future version of Java?
Thanks.
Actually the concept of a co-routine was the first design of the Java threading system. The wait/notify mechanism is a simplistic form of co-routine where notify is equivalent to yield etc.
Since then much has been done, particularly to make structures thread-safe rather than algorithms. This derives from the realization that it is not the code that must synchronize/yield but the data structure used to communicate between the threads that must be thread-safe.
Project Loom
Continuations and Coroutines will come to Java in the nearer future and they’ll be called virtual threads (also referred to as fibers). There’s a project called Loom:
Project Loom is intended to explore, incubate and deliver Java VM features and APIs built on top of them for the purpose of supporting easy-to-use, high-throughput lightweight concurrency and new programming models on the Java platform. This is accomplished by the addition of the following constructs:
Virtual threads
Delimited continuations
Tail-call elimination
Further reading: https://cr.openjdk.java.net/~rpressler/loom/Loom-Proposal.html
To quote that document:
It is the goal of this project to add a public delimited continuation (or coroutine) construct to the Java platform. However, this goal is secondary to fibers …
Preliminary builds of Project Loom are available now, based on early-access Java 16.
On the "are there any plans ..." part of the question, the answer is:
Not at this stage
The JEP list (http://openjdk.java.net/jeps/0) does not make any mention of coroutines. The list covers features added in Java 8, added or targeted for Java 9, or proposed for future releases.
Interestingly, there was an RFE submitted in March 2013 (https://bugs.openjdk.java.net/browse/JDK-8029988). The RFE only got one vote, and it was closed 9 months with the suggestion to submit a JEP. Nobody has bothered to take the idea any further, which to me is telling.
It's synced with Java 15 build 7.
Link
There's an another choice is here for Java6+
A pythonic coroutine implementation:
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
class CorRunRAII {
private final List<WeakReference<? extends CorRun>> resources = new ArrayList<>();
public CorRunRAII add(CorRun resource) {
if (resource == null) {
return this;
}
resources.add(new WeakReference<>(resource));
return this;
}
public CorRunRAII addAll(List<? extends CorRun> arrayList) {
if (arrayList == null) {
return this;
}
for (CorRun corRun : arrayList) {
add(corRun);
}
return this;
}
#Override
protected void finalize() throws Throwable {
super.finalize();
for (WeakReference<? extends CorRun> corRunWeakReference : resources) {
CorRun corRun = corRunWeakReference.get();
if (corRun != null) {
corRun.stop();
}
}
}
}
class CorRunYieldReturn<ReceiveType, YieldReturnType> {
public final AtomicReference<ReceiveType> receiveValue;
public final LinkedBlockingDeque<AtomicReference<YieldReturnType>> yieldReturnValue;
CorRunYieldReturn(AtomicReference<ReceiveType> receiveValue, LinkedBlockingDeque<AtomicReference<YieldReturnType>> yieldReturnValue) {
this.receiveValue = receiveValue;
this.yieldReturnValue = yieldReturnValue;
}
}
interface CorRun<ReceiveType, YieldReturnType> extends Runnable, Callable<YieldReturnType> {
boolean start();
void stop();
void stop(final Throwable throwable);
boolean isStarted();
boolean isEnded();
Throwable getError();
ReceiveType getReceiveValue();
void setResultForOuter(YieldReturnType resultForOuter);
YieldReturnType getResultForOuter();
YieldReturnType receive(ReceiveType value);
ReceiveType yield();
ReceiveType yield(YieldReturnType value);
<TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(final CorRun<TargetReceiveType, TargetYieldReturnType> another);
<TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(final CorRun<TargetReceiveType, TargetYieldReturnType> another, final TargetReceiveType value);
}
abstract class CorRunSync<ReceiveType, YieldReturnType> implements CorRun<ReceiveType, YieldReturnType> {
private ReceiveType receiveValue;
public final List<WeakReference<CorRun>> potentialChildrenCoroutineList = new ArrayList<>();
// Outside
private AtomicBoolean isStarted = new AtomicBoolean(false);
private AtomicBoolean isEnded = new AtomicBoolean(false);
private Throwable error;
private YieldReturnType resultForOuter;
#Override
public boolean start() {
boolean isStarted = this.isStarted.getAndSet(true);
if ((! isStarted)
&& (! isEnded())) {
receive(null);
}
return isStarted;
}
#Override
public void stop() {
stop(null);
}
#Override
public void stop(Throwable throwable) {
isEnded.set(true);
if (throwable != null) {
error = throwable;
}
for (WeakReference<CorRun> weakReference : potentialChildrenCoroutineList) {
CorRun child = weakReference.get();
if (child != null) {
child.stop();
}
}
}
#Override
public boolean isStarted() {
return isStarted.get();
}
#Override
public boolean isEnded() {
return isEnded.get();
}
#Override
public Throwable getError() {
return error;
}
#Override
public ReceiveType getReceiveValue() {
return receiveValue;
}
#Override
public void setResultForOuter(YieldReturnType resultForOuter) {
this.resultForOuter = resultForOuter;
}
#Override
public YieldReturnType getResultForOuter() {
return resultForOuter;
}
#Override
public synchronized YieldReturnType receive(ReceiveType value) {
receiveValue = value;
run();
return getResultForOuter();
}
#Override
public ReceiveType yield() {
return yield(null);
}
#Override
public ReceiveType yield(YieldReturnType value) {
resultForOuter = value;
return receiveValue;
}
#Override
public <TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(CorRun<TargetReceiveType, TargetYieldReturnType> another) {
return yieldFrom(another, null);
}
#Override
public <TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(CorRun<TargetReceiveType, TargetYieldReturnType> another, TargetReceiveType value) {
if (another == null || another.isEnded()) {
throw new RuntimeException("Call null or isEnded coroutine");
}
potentialChildrenCoroutineList.add(new WeakReference<CorRun>(another));
synchronized (another) {
boolean isStarted = another.start();
boolean isJustStarting = ! isStarted;
if (isJustStarting && another instanceof CorRunSync) {
return another.getResultForOuter();
}
return another.receive(value);
}
}
#Override
public void run() {
try {
this.call();
}
catch (Exception e) {
e.printStackTrace();
stop(e);
return;
}
}
}
abstract class CorRunThread<ReceiveType, YieldReturnType> implements CorRun<ReceiveType, YieldReturnType> {
private final ExecutorService childExecutorService = newExecutorService();
private ExecutorService executingOnExecutorService;
private static final CorRunYieldReturn DUMMY_COR_RUN_YIELD_RETURN = new CorRunYieldReturn(new AtomicReference<>(null), new LinkedBlockingDeque<AtomicReference>());
private final CorRun<ReceiveType, YieldReturnType> self;
public final List<WeakReference<CorRun>> potentialChildrenCoroutineList;
private CorRunYieldReturn<ReceiveType, YieldReturnType> lastCorRunYieldReturn;
private final LinkedBlockingDeque<CorRunYieldReturn<ReceiveType, YieldReturnType>> receiveQueue;
// Outside
private AtomicBoolean isStarted = new AtomicBoolean(false);
private AtomicBoolean isEnded = new AtomicBoolean(false);
private Future<YieldReturnType> future;
private Throwable error;
private final AtomicReference<YieldReturnType> resultForOuter = new AtomicReference<>();
CorRunThread() {
executingOnExecutorService = childExecutorService;
receiveQueue = new LinkedBlockingDeque<>();
potentialChildrenCoroutineList = new ArrayList<>();
self = this;
}
#Override
public void run() {
try {
self.call();
}
catch (Exception e) {
stop(e);
return;
}
stop();
}
#Override
public abstract YieldReturnType call();
#Override
public boolean start() {
return start(childExecutorService);
}
protected boolean start(ExecutorService executorService) {
boolean isStarted = this.isStarted.getAndSet(true);
if (!isStarted) {
executingOnExecutorService = executorService;
future = (Future<YieldReturnType>) executingOnExecutorService.submit((Runnable) self);
}
return isStarted;
}
#Override
public void stop() {
stop(null);
}
#Override
public void stop(final Throwable throwable) {
if (throwable != null) {
error = throwable;
}
isEnded.set(true);
returnYieldValue(null);
// Do this for making sure the coroutine has checked isEnd() after getting a dummy value
receiveQueue.offer(DUMMY_COR_RUN_YIELD_RETURN);
for (WeakReference<CorRun> weakReference : potentialChildrenCoroutineList) {
CorRun child = weakReference.get();
if (child != null) {
if (child instanceof CorRunThread) {
((CorRunThread)child).tryStop(childExecutorService);
}
}
}
childExecutorService.shutdownNow();
}
protected void tryStop(ExecutorService executorService) {
if (this.executingOnExecutorService == executorService) {
stop();
}
}
#Override
public boolean isEnded() {
return isEnded.get() || (
future != null && (future.isCancelled() || future.isDone())
);
}
#Override
public boolean isStarted() {
return isStarted.get();
}
public Future<YieldReturnType> getFuture() {
return future;
}
#Override
public Throwable getError() {
return error;
}
#Override
public void setResultForOuter(YieldReturnType resultForOuter) {
this.resultForOuter.set(resultForOuter);
}
#Override
public YieldReturnType getResultForOuter() {
return this.resultForOuter.get();
}
#Override
public YieldReturnType receive(ReceiveType value) {
LinkedBlockingDeque<AtomicReference<YieldReturnType>> yieldReturnValue = new LinkedBlockingDeque<>();
offerReceiveValue(value, yieldReturnValue);
try {
AtomicReference<YieldReturnType> takeValue = yieldReturnValue.take();
return takeValue == null ? null : takeValue.get();
} catch (InterruptedException e) {
e.printStackTrace();
}
return null;
}
#Override
public ReceiveType yield() {
return yield(null);
}
#Override
public ReceiveType yield(final YieldReturnType value) {
returnYieldValue(value);
return getReceiveValue();
}
#Override
public <TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(final CorRun<TargetReceiveType, TargetYieldReturnType> another) {
return yieldFrom(another, null);
}
#Override
public <TargetReceiveType, TargetYieldReturnType> TargetYieldReturnType yieldFrom(final CorRun<TargetReceiveType, TargetYieldReturnType> another, final TargetReceiveType value) {
if (another == null || another.isEnded()) {
throw new RuntimeException("Call null or isEnded coroutine");
}
boolean isStarted = false;
potentialChildrenCoroutineList.add(new WeakReference<CorRun>(another));
synchronized (another) {
if (another instanceof CorRunThread) {
isStarted = ((CorRunThread)another).start(childExecutorService);
}
else {
isStarted = another.start();
}
boolean isJustStarting = ! isStarted;
if (isJustStarting && another instanceof CorRunSync) {
return another.getResultForOuter();
}
TargetYieldReturnType send = another.receive(value);
return send;
}
}
#Override
public ReceiveType getReceiveValue() {
setLastCorRunYieldReturn(takeLastCorRunYieldReturn());
return lastCorRunYieldReturn.receiveValue.get();
}
protected void returnYieldValue(final YieldReturnType value) {
CorRunYieldReturn<ReceiveType, YieldReturnType> corRunYieldReturn = lastCorRunYieldReturn;
if (corRunYieldReturn != null) {
corRunYieldReturn.yieldReturnValue.offer(new AtomicReference<>(value));
}
}
protected void offerReceiveValue(final ReceiveType value, LinkedBlockingDeque<AtomicReference<YieldReturnType>> yieldReturnValue) {
receiveQueue.offer(new CorRunYieldReturn(new AtomicReference<>(value), yieldReturnValue));
}
protected CorRunYieldReturn<ReceiveType, YieldReturnType> takeLastCorRunYieldReturn() {
try {
return receiveQueue.take();
} catch (InterruptedException e) {
e.printStackTrace();
}
return null;
}
protected void setLastCorRunYieldReturn(CorRunYieldReturn<ReceiveType,YieldReturnType> lastCorRunYieldReturn) {
this.lastCorRunYieldReturn = lastCorRunYieldReturn;
}
protected ExecutorService newExecutorService() {
return Executors.newCachedThreadPool(getThreadFactory());
}
protected ThreadFactory getThreadFactory() {
return new ThreadFactory() {
#Override
public Thread newThread(final Runnable runnable) {
Thread thread = new Thread(runnable);
thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
#Override
public void uncaughtException(Thread thread, Throwable throwable) {
throwable.printStackTrace();
if (runnable instanceof CorRun) {
CorRun self = (CorRun) runnable;
self.stop(throwable);
thread.interrupt();
}
}
});
return thread;
}
};
}
}
Now you can use pythonic coroutines in this way
(e.g. fibonacci numbers)
Thread Version:
class Fib extends CorRunThread<Integer, Integer> {
#Override
public Integer call() {
Integer times = getReceiveValue();
do {
int a = 1, b = 1;
for (int i = 0; times != null && i < times; i++) {
int temp = a + b;
a = b;
b = temp;
}
// A pythonic "yield", i.e., it returns `a` to the caller and waits `times` value from the next caller
times = yield(a);
} while (! isEnded());
setResultForOuter(Integer.MAX_VALUE);
return getResultForOuter();
}
}
class MainRun extends CorRunThread<String, String> {
#Override
public String call() {
// The fib coroutine would be recycled by its parent
// (no requirement to call its start() and stop() manually)
// Otherwise, if you want to share its instance and start/stop it manually,
// please start it before being called by yieldFrom() and stop it in the end.
Fib fib = new Fib();
String result = "";
Integer current;
int times = 10;
for (int i = 0; i < times; i++) {
// A pythonic "yield from", i.e., it calls fib with `i` parameter and waits for returned value as `current`
current = yieldFrom(fib, i);
if (fib.getError() != null) {
throw new RuntimeException(fib.getError());
}
if (current == null) {
continue;
}
if (i > 0) {
result += ",";
}
result += current;
}
setResultForOuter(result);
return result;
}
}
Sync(non-thread) version:
class Fib extends CorRunSync<Integer, Integer> {
#Override
public Integer call() {
Integer times = getReceiveValue();
int a = 1, b = 1;
for (int i = 0; times != null && i < times; i++) {
int temp = a + b;
a = b;
b = temp;
}
yield(a);
return getResultForOuter();
}
}
class MainRun extends CorRunSync<String, String> {
#Override
public String call() {
CorRun<Integer, Integer> fib = null;
try {
fib = new Fib();
} catch (Exception e) {
e.printStackTrace();
}
String result = "";
Integer current;
int times = 10;
for (int i = 0; i < times; i++) {
current = yieldFrom(fib, i);
if (fib.getError() != null) {
throw new RuntimeException(fib.getError());
}
if (current == null) {
continue;
}
if (i > 0) {
result += ",";
}
result += current;
}
stop();
setResultForOuter(result);
if (Utils.isEmpty(result)) {
throw new RuntimeException("Error");
}
return result;
}
}
Execution(Both versions will work):
// Run the entry coroutine
MainRun mainRun = new MainRun();
mainRun.start();
// Wait for mainRun ending for 5 seconds
long startTimestamp = System.currentTimeMillis();
while(!mainRun.isEnded()) {
if (System.currentTimeMillis() - startTimestamp > TimeUnit.SECONDS.toMillis(5)) {
throw new RuntimeException("Wait too much time");
}
}
// The result should be "1,1,2,3,5,8,13,21,34,55"
System.out.println(mainRun.getResultForOuter());
I have defined a filter for the termination condition by k-means.
if I run my app it always compute only one iteration.
I think the problem is here:
DataSet<GeoTimeDataCenter> finalCentroids = loop.closeWith(newCentroids, newCentroids.join(loop).where("*").equalTo("*").filter(new MyFilter()));
or maybe the filter function:
public static final class MyFilter implements FilterFunction<Tuple2<GeoTimeDataCenter, GeoTimeDataCenter>> {
private static final long serialVersionUID = 5868635346889117617L;
public boolean filter(Tuple2<GeoTimeDataCenter, GeoTimeDataCenter> tuple) throws Exception {
if(tuple.f0.equals(tuple.f1)) {
return true;
}
else {
return false;
}
}
}
best regards,
paul
my full code here:
public void run() {
//load properties
Properties pro = new Properties();
FileSystem fs = null;
try {
pro.load(FlinkMain.class.getResourceAsStream("/config.properties"));
fs = FileSystem.get(new URI(pro.getProperty("hdfs.namenode")),new org.apache.hadoop.conf.Configuration());
} catch (Exception e) {
e.printStackTrace();
}
int maxIteration = Integer.parseInt(pro.getProperty("maxiterations"));
String outputPath = fs.getHomeDirectory()+pro.getProperty("flink.output");
// set up execution environment
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// get input points
DataSet<GeoTimeDataTupel> points = getPointDataSet(env);
DataSet<GeoTimeDataCenter> centroids = null;
try {
centroids = getCentroidDataSet(env);
} catch (Exception e1) {
e1.printStackTrace();
}
// set number of bulk iterations for KMeans algorithm
IterativeDataSet<GeoTimeDataCenter> loop = centroids.iterate(maxIteration);
DataSet<GeoTimeDataCenter> newCentroids = points
// compute closest centroid for each point
.map(new SelectNearestCenter(this.getBenchmarkCounter())).withBroadcastSet(loop, "centroids")
// count and sum point coordinates for each centroid
.groupBy(0).reduceGroup(new CentroidAccumulator())
// compute new centroids from point counts and coordinate sums
.map(new CentroidAverager(this.getBenchmarkCounter()));
// feed new centroids back into next iteration with termination condition
DataSet<GeoTimeDataCenter> finalCentroids = loop.closeWith(newCentroids, newCentroids.join(loop).where("*").equalTo("*").filter(new MyFilter()));
DataSet<Tuple2<Integer, GeoTimeDataTupel>> clusteredPoints = points
// assign points to final clusters
.map(new SelectNearestCenter(-1)).withBroadcastSet(finalCentroids, "centroids");
// emit result
clusteredPoints.writeAsCsv(outputPath+"/points", "\n", " ");
finalCentroids.writeAsText(outputPath+"/centers");//print();
// execute program
try {
env.execute("KMeans Flink");
} catch (Exception e) {
e.printStackTrace();
}
}
public static final class MyFilter implements FilterFunction<Tuple2<GeoTimeDataCenter, GeoTimeDataCenter>> {
private static final long serialVersionUID = 5868635346889117617L;
public boolean filter(Tuple2<GeoTimeDataCenter, GeoTimeDataCenter> tuple) throws Exception {
if(tuple.f0.equals(tuple.f1)) {
return true;
}
else {
return false;
}
}
}
I think the problem is the filter function (modulo the code you haven't posted). Flink's termination criterion works the following way: The termination criterion is met if the provided termination DataSet is empty. Otherwise the next iteration is started if the maximum number of iterations has not been exceeded.
Flink's filter function keeps only those elements for which the FilterFunction returns true. Thus, with your MyFilter implementation you only keep the centroids which are before and after the iteration identical. This implies that you'll obtain an empty DataSet if all centroids have changed and, thus, the iteration terminates. This is clearly the inverse of the actual termination criterion. The termination criterion should be: Continue with k-means as long as there is a centroid which has changed.
You can do this with a coGroup function where you emit elements if there is no matching centroid from the preceding centroid DataSet. This is similar to a left outer join, just that you discard non null matches.
public static void main(String[] args) throws Exception {
// set up the execution environment
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Element> oldDS = env.fromElements(new Element(1, "test"), new Element(2, "test"), new Element(3, "foobar"));
DataSet<Element> newDS = env.fromElements(new Element(1, "test"), new Element(3, "foobar"), new Element(4, "test"));
DataSet<Element> filtered = newDS.coGroup(oldDS).where("*").equalTo("*").with(new FilterCoGroup());
filtered.print();
}
public static class FilterCoGroup implements CoGroupFunction<Element, Element, Element> {
#Override
public void coGroup(
Iterable<Element> newElements,
Iterable<Element> oldElements,
Collector<Element> collector) throws Exception {
List<Element> persistedElements = new ArrayList<Element>();
for(Element element: oldElements) {
persistedElements.add(element);
}
for(Element newElement: newElements) {
boolean contained = false;
for(Element oldElement: persistedElements) {
if(newElement.equals(oldElement)){
contained = true;
}
}
if(!contained) {
collector.collect(newElement);
}
}
}
}
public static class Element implements Key {
private int id;
private String name;
public Element(int id, String name) {
this.id = id;
this.name = name;
}
public Element() {
this(-1, "");
}
#Override
public int hashCode() {
return 31 + 7 * name.hashCode() + 11 * id;
}
#Override
public boolean equals(Object obj) {
if(obj instanceof Element) {
Element element = (Element) obj;
return id == element.id && name.equals(element.name);
} else {
return false;
}
}
#Override
public int compareTo(Object o) {
if(o instanceof Element) {
Element element = (Element) o;
if(id == element.id) {
return name.compareTo(element.name);
} else {
return id - element.id;
}
} else {
throw new RuntimeException("Comparing incompatible types.");
}
}
#Override
public void write(DataOutputView dataOutputView) throws IOException {
dataOutputView.writeInt(id);
dataOutputView.writeUTF(name);
}
#Override
public void read(DataInputView dataInputView) throws IOException {
id = dataInputView.readInt();
name = dataInputView.readUTF();
}
#Override
public String toString() {
return "(" + id + "; " + name + ")";
}
}
Run the main function in File2 , the problem is : threads stuck at "rval=MVEL.executeExpression(compiledExpression, vars);" , 10 threads run in sequential order, not parallel , I wanna know why this happened.
PS: I'm using MVEL 2.2 , the latest version
File1:MVELHelper.java
public class MVELHelper {
private static ParserContext _ctx = new ParserContext(false);
//public static Object execute(String expression, Map<String, Object> vars, Databus databus) throws Exception {
public static Object execute(String expression, Map<String, Object> vars) throws Exception {
Object rval = null;
try {
if(vars == null) {
rval = MVEL.eval(expression, new HashMap<String,Object>());
}
else {
rval = MVEL.eval(expression, vars);
}
return rval;
}
catch(Exception e) {
throw new Exception("MVEL FAILED:"+expression,e);
}
}
public static Serializable compile(String text, ParserContext ctx)
throws Exception {
if(ctx == null) {
//ctx = _ctx;
ctx=new ParserContext(false);
}
Serializable exp = null;
try {
exp = MVEL.compileExpression(text, ctx);
//exp = MVEL.compileExpression(text);
}
catch (Exception e) {
throw new Exception("failed to compile expression.", e);
}
return exp;
}
public static Object compileAndExecute(String expression, Map<String, Object> vars) throws Exception {
Object rval = null;
try {
Serializable compiledExpression=compile(expression,null);
System.out.println("[COMPILE OVER, Thread Id="+Thread.currentThread().getId()+"] ");
if(vars == null) {
rval=MVEL.executeExpression(compiledExpression, new HashMap<String,Object>());
//rval = MVEL.eval(exp, new HashMap<String,Object>());
}
else {
//rval=MVEL.executeExpression(compiledExpression, vars,(VariableResolverFactory)null);
rval=MVEL.executeExpression(compiledExpression, vars);
//rval = MVEL.eval(expression, vars);
}
return rval;
}
catch(Exception e) {
throw new Exception("MVEL FAILED:"+expression,e);
}
}
}
File2:ExecThread3.java
public class ExecThread3 implements Runnable{
Map dataMap=null;
public Map getDataMap() {
return dataMap;
}
public void setDataMap(Map dataMap) {
this.dataMap = dataMap;
}
#Override
public void run() {
Map varsMap = new HashMap();
Map dataMap=new HashMap();
dataMap.put("count",100);
varsMap.put("dataMap", dataMap);
String expression="System.out.println(\"[BEFORE Thread Id=\"+Thread.currentThread().getId()+\"] \"+dataMap.get(\"count\"));"+
"Thread.sleep(3000);"+
"System.err.println(\"[AFTER Thread Id=\"+Thread.currentThread().getId()+\"] \"+dataMap.get(\"count\"));";
try {
//MVEL.compileExpression(expression);
MVELHelper.compileAndExecute(expression, varsMap);
}
catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static void main(String[] args) {
for(int k=0;k<10;k++){
ExecThread3 execThread=new ExecThread3();
new Thread(execThread).start();
}
}
}
So, I'm working on a plugin at work and I've run into a situation where I could use a ContentProposalAdapter to my benefit. Basically, a person will start typing in someone's name and then a list of names matching the current query will be returned in a type-ahead manner (a la Google). So, I created a class IContentProposalProvider which, upon calling it's getProposals() method fires off a thread which handles getting the proposals in the background. The problem I am having is that I run into a race condition, where the processing for getting the proposals via HTTP happens and I try to get the proposals before they have actually been retrieved.
Now, I'm trying not to run into an issue of Thread hell, and that isn't getting me very far anyway. So, here is what I've done so far. Does anyone have any suggestions as to what I can do?
public class ProfilesProposalProvider implements IContentProposalProvider, PropertyChangeListener {
private IContentProposal[] props;
#Override
public IContentProposal[] getProposals(String arg0, int arg1) {
Display display = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell().getDisplay();
RunProfilesJobThread t1 = new RunProfilesJobThread(arg0, display);
t1.run();
return props;
}
#Override
public void propertyChange(PropertyChangeEvent arg0) {
if (arg0.getSource() instanceof RunProfilesJobThread){
RunProfilesJobThread thread = (RunProfilesJobThread)arg0.getSource();
props = thread.getProps();
}
}
}
public class RunProfilesJobThread extends Thread {
private ProfileProposal[] props;
private Display display;
private String query;
public RunProfilesJobThread(String query, Display display){
this.query = query;
}
#Override
public void run() {
if (!(query.equals(""))){
GetProfilesJob job = new GetProfilesJob("profiles", query);
job.schedule();
try {
job.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
GetProfilesJobInfoThread thread = new GetProfilesJobInfoThread(job.getResults());
try {
thread.join();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
props = thread.getProps();
}
}
public ProfileProposal[] getProps(){
return props;
}
}
public class GetProfilesJobInfoThread extends Thread {
private ArrayList<String> names;
private ProfileProposal[] props;
public GetProfilesJobInfoThread(ArrayList<String> names){
this.names = names;
}
#Override
public void run() {
if (names != null){
props = new ProfileProposal[names.size()];
for (int i = 0; i < props.length - 1; i++){
ProfileProposal temp = new ProfileProposal(names.get(i), names.get(i));
props[i] = temp;
}
}
}
public ProfileProposal[] getProps(){
return props;
}
}
Ok, i'll try it...
I haven't tried to run it, but it should work more or less. At least it's a good start. If you have any questions, feel free to ask.
public class ProfilesProposalProvider implements IContentProposalProvider {
private List<IContentProposal> proposals;
private String proposalQuery;
private Thread retrievalThread;
public void setProposals( List<IContentProposal> proposals, String query ) {
synchronized( this ) {
this.proposals = proposals;
this.proposalQuery = query;
}
}
public IContentProposal[] getProposals( String contents, int position ) {
// Synchronize incoming thread and retrieval thread, so that the proposal list
// is not replaced while we're processing it.
synchronized( this ) {
/**
* Get proposals if query is longer than one char, or if the current list of proposals does with a different
* prefix than the new query, and only if the current retrieval thread is finished.
*/
if ( retrievalThread == null && contents.length() > 1 && ( proposals == null || !contents.startsWith( proposalQuery ) ) ) {
getProposals( contents );
}
/**
* Select valid proposals from retrieved list.
*/
if ( proposals != null ) {
List<IContentProposal> validProposals = new ArrayList<IContentProposal>();
for ( IContentProposal prop : proposals ) {
if(prop == null) {
continue;
}
String propVal = prop.getContent();
if ( isProposalValid( propVal, contents )) {
validProposals.add( prop );
}
}
return validProposals.toArray( new IContentProposal[ validProposals.size() ] );
}
}
return new IContentProposal[0];
}
protected void getProposals( final String query ) {
retrievalThread = new Thread() {
#Override
public void run() {
GetProfilesJob job = new GetProfilesJob("profiles", query);
job.schedule();
try {
job.join();
ArrayList<String> names = job.getResults();
if (names != null){
List<IContentProposal> props = new ArrayList<IContentProposal>();
for ( String name : names ) {
props.add( new ProfileProposal( name, name ) );
}
setProposals( props, query );
}
} catch (InterruptedException e) {
e.printStackTrace();
}
retrievalThread = null;
}
};
retrievalThread.start();
}
protected boolean isProposalValid( String proposalValue, String contents ) {
return ( proposalValue.length() >= contents.length() && proposalValue.substring(0, contents.length()).equalsIgnoreCase(contents));
}
}