I have a simple ECHO server and client written using Netty. The server and client are on the same machine. I was expecting mean latency of the order of a couple of milliseconds, however, regardless of what I try I can never bring the latency down to sub-millisecond durations. Any help would be greatly appreciated.
Update: Even when using System.nanoTime I see the latency around 25-30ms.
EchoClient
import org.jboss.netty.bootstrap.ClientBootstrap;
import org.jboss.netty.channel.*;
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
import org.jboss.netty.handler.execution.ExecutionHandler;
import org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
public class EchoClient {
public static void main(String[] args) {
if (args.length != 1) {
System.err.println(String.format("usage: %s <num-msgs>", EchoClient.class.getCanonicalName()));
System.exit(1);
}
final long NUM_MSGS = Integer.parseInt(args[0]);
final EchoClientHandler echoClientHandler = new EchoClientHandler();
final ExecutionHandler e =
new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor(4, 128 * 1024L, 128 * 1024L));
ChannelFactory factory =
new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
Executors.newCachedThreadPool());
ClientBootstrap bootstrap = new ClientBootstrap(factory);
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
#Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new TestPayloadEncoder(),
new TestPayloadDecoder(),
e,
echoClientHandler);
}
});
bootstrap.setOption("tcpNoDelay", true);
bootstrap.setOption("keepAlive", false);
bootstrap.setOption("child.keepAlive", false);
bootstrap.setOption("sendBufferSize", 128 * 1024L);
bootstrap.setOption("receiveBufferSize", 128 * 1024L);
for (int i = 0; i < NUM_MSGS; i++) {
final InetSocketAddress serverAddr =
new InetSocketAddress("localhost", 8080);
bootstrap.connect(serverAddr).addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture f) throws Exception {
if (f.isSuccess()) {
f.getChannel().write(new TestPayload());
}
}
});
}
while (echoClientHandler.numMsgs.get() < NUM_MSGS);
System.out.println(echoClientHandler.numMsgs);
System.out.println(echoClientHandler.aggTime);
System.out.println(String.format("mean transfer time: %.2fms",
((float) echoClientHandler.aggTime.get()) /
echoClientHandler.numMsgs.get()));
System.out.flush();
e.releaseExternalResources();
factory.releaseExternalResources();
}
}
EchoClientHandler
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
import java.util.concurrent.atomic.AtomicLong;
public class EchoClientHandler extends SimpleChannelHandler {
public final AtomicLong numMsgs = new AtomicLong(0);
public final AtomicLong aggTime = new AtomicLong(0);
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
long recvTime = System.currentTimeMillis();
TestPayload m = (TestPayload) e.getMessage();
aggTime.addAndGet(recvTime - m.getTime());
numMsgs.incrementAndGet();
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
e.getCause().printStackTrace();
e.getChannel().close();
}
}
EchoServer
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.execution.ExecutionHandler;
import org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
public class EchoServer {
public static void main(String[] args) {
ChannelFactory factory =
new NioServerSocketChannelFactory(Executors.newFixedThreadPool(4),
Executors.newFixedThreadPool(32),
32);
ServerBootstrap bootstrap = new ServerBootstrap(factory);
final ExecutionHandler e =
new ExecutionHandler(new OrderedMemoryAwareThreadPoolExecutor(4, 128 * 1024L, 128 * 1024L));
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
#Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(e, new EchoServerHandler());
}
});
bootstrap.setOption("reuseAddr", true);
bootstrap.setOption("keepAlive", false);
bootstrap.setOption("child.reuseAddr", true);
bootstrap.setOption("child.soLinger", 0);
bootstrap.setOption("child.keepAlive", false);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.sendBufferSize", 128 * 1024L);
bootstrap.setOption("child.receiveBufferSize", 128 * 1024L);
bootstrap.bind(new InetSocketAddress("localhost", 8080));
}
}
EchoServerHandler
import org.jboss.netty.channel.*;
public class EchoServerHandler extends SimpleChannelHandler {
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
e.getChannel().write(e.getMessage());
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
e.getCause().printStackTrace();
e.getChannel().close();
}
}
TestPayload
import org.jboss.netty.buffer.ChannelBuffer;
import java.util.Date;
import java.util.Random;
public class TestPayload {
private static final int PREAMBLE_LEN = (Long.SIZE + Integer.SIZE) / 8;
private static final Random RNG;
static {
RNG = new Random();
RNG.setSeed(new Date().getTime());
}
private final int paddingLen;
private final byte[] padding;
private final long time;
public TestPayload() {
this(65536);
}
public TestPayload(int sizeInBytes) {
this.paddingLen = sizeInBytes;
this.padding = new byte[this.paddingLen];
RNG.nextBytes(this.padding);
this.time = System.currentTimeMillis();
}
private TestPayload(long time, int paddingLen, byte[] padding) {
this.paddingLen = paddingLen;
this.padding = padding;
this.time = time;
}
public long getTime() {
return this.time;
}
public void writeTo(ChannelBuffer buf) {
buf.writeLong(this.time);
buf.writeInt(this.paddingLen);
buf.writeBytes(this.padding);
}
public static TestPayload readFrom(ChannelBuffer buf) {
if (buf.readableBytes() < PREAMBLE_LEN) {
return null;
}
buf.markReaderIndex();
long time = buf.readLong();
int paddingLen = buf.readInt();
if (buf.readableBytes() < paddingLen) {
buf.resetReaderIndex();
return null;
}
byte[] padding = new byte[paddingLen];
buf.readBytes(padding);
return new TestPayload(time, paddingLen, padding);
}
public int getLength() {
return PREAMBLE_LEN + this.paddingLen;
}
Are you running your client and your server in different JVMs? If so, measuring time across JVM boundaries is not as straight forward as you would think. For example using System.nanoTime() is not necessarily going to work according to the oracle java doc:
The values returned by this method become meaningful only when the difference between two such values, obtained within the same instance of a Java virtual machine, is computed.
Assuming you can find a reliable way to measure time across JVMs and if your goal is to isolate how long it takes a Netty client to send to a Netty server then simplify your use case to isolate this as much as possible. For example, in the above code you are counting the time to send/receive an array of 65536 bytes. Remove this from the timing experiment to help isolate where the bottlenecks are.
How many runs are you collecting timing from? Are you excluding initialization time of Netty itself (running a few messages between client/server before taking timing)?
Also how does adjusting your configuration impact performance? There are plenty of knobs to tweak (thread pool size, send/receive buff size, etc...).
What version of Netty are you using, and is there an option to force a flush after you write?
I don't see the code for EchoClient. It looks like you copy/pasted the code for EchoClientHandler where EchoClient's code should be.
Related
I'm writing a tool that will generate a high amount of HTTP calls against a webserver. At this moment I'm interested on how many requests can I make per second. I'm not interested now of the result of those requests.
I'm measuring the time spent to send 1k requests against google.com and I get 69 milliseconds :
but when I'm sniffing the traffic with WireShark I see that sending all the GET requests is taking almost 4 seconds:
start of the calls
end of the calls
Tool has been run from IntelliJ on Windows 10, I7 1.8 Ghz, 32 GB of RAM.
My question is: why I have this difference? Sending 1k HTTP GET requests should be quick, but it takes almost 4 seconds. What I'm doing wrong here?
The code above is only for testing purposes and it's quite ugly, so bear with me. Also I'm not quite familiar with NIO.
import org.apache.commons.lang3.time.StopWatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicInteger;
public class UJPPHighTrafficClient {
public static final Logger logger = LoggerFactory.getLogger(UJPPHighTrafficClient.class);
public static final int iterations = 1000;
public static void main(String[] args) {
doStartClient();
}
private static void doStartClient() {
logger.info("starting the client");
UJPPHighTrafficExecutor executor = new UJPPHighTrafficExecutor();
StopWatch watch = new StopWatch();
watch.start();
for (int i = 0; i < iterations; i++) {
executor.run();
}
watch.stop();
logger.info("Run " + iterations + " executions in " + watch.getTime() + " milliseconds");
}
}
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.config.ConnectionConfig;
import org.apache.http.impl.nio.DefaultHttpClientIODispatch;
import org.apache.http.impl.nio.pool.BasicNIOConnPool;
import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor;
import org.apache.http.impl.nio.reactor.IOReactorConfig;
import org.apache.http.message.BasicHttpEntityEnclosingRequest;
import org.apache.http.nio.protocol.*;
import org.apache.http.nio.reactor.ConnectingIOReactor;
import org.apache.http.nio.reactor.IOEventDispatch;
import org.apache.http.nio.reactor.IOReactorException;
import org.apache.http.protocol.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.concurrent.atomic.AtomicInteger;
public class UJPPHighTrafficExecutor {
private final Logger logger = LoggerFactory.getLogger("debug");
public static ConnectingIOReactor requestsReactor = null;
private static BasicNIOConnPool clientConnectionPool = null;
public static HttpAsyncRequester clientRequester = null;
public static Thread runnerThread = null;
private static AtomicInteger counter = null;
public static final int cores = Runtime.getRuntime().availableProcessors() * 2;
public UJPPHighTrafficExecutor() {
counter = new AtomicInteger();
counter.set(0);
initializeConnectionManager();
}
public void initializeConnectionManager() {
try {
requestsReactor =
new DefaultConnectingIOReactor(IOReactorConfig.
custom().
setIoThreadCount(cores).
build());
clientConnectionPool = new BasicNIOConnPool(requestsReactor, ConnectionConfig.DEFAULT);
clientConnectionPool.setDefaultMaxPerRoute(cores);
clientConnectionPool.setMaxTotal(100);
clientRequester = initializeHttpClient(requestsReactor);
} catch (IOReactorException ex) {
logger.error(" initializeConnectionManager " + ex.getMessage());
}
}
private HttpAsyncRequester initializeHttpClient(final ConnectingIOReactor ioReactor) {
// Create HTTP protocol processing chain
HttpProcessor httpproc = HttpProcessorBuilder.create()
// Use standard client-side protocol interceptors
.add(new RequestContent(true)).
add(new RequestTargetHost()).
add(new RequestConnControl())
.add(new RequestExpectContinue(true)).
build();
// Create HTTP requester
HttpAsyncRequester requester = new HttpAsyncRequester(httpproc);
// Create client-side HTTP protocol handler
HttpAsyncRequestExecutor protocolHandler = new HttpAsyncRequestExecutor();
// Create client-side I/O event dispatch
final IOEventDispatch ioEventDispatch =
new DefaultHttpClientIODispatch(protocolHandler, ConnectionConfig.DEFAULT);
// Run the I/O reactor in a separate thread
runnerThread = new Thread("Client") {
#Override
public void run() {
try {
ioReactor.execute(ioEventDispatch);
} catch (InterruptedIOException ex) {
logger.error("Interrupted", ex);
} catch (IOException e) {
logger.error("I/O error", e);
} catch (Exception e) {
logger.error("Exception encountered in Client ", e.getMessage(), e);
}
logger.info("Client shutdown");
}
};
runnerThread.start();
return requester;
}
public void run() {
HttpHost httpHost = new HttpHost("google.com", 80, "http");
final HttpCoreContext coreContext = HttpCoreContext.create();
ProtocolVersion ver = new ProtocolVersion("HTTP", 1, 1);
BasicHttpEntityEnclosingRequest request = new BasicHttpEntityEnclosingRequest("GET", "/", ver);
clientRequester.execute(new BasicAsyncRequestProducer(httpHost, request), new BasicAsyncResponseConsumer(),
clientConnectionPool, coreContext,
// Handle HTTP response from a callback
new FutureCallback<HttpResponse>() {
#Override
public void completed(final HttpResponse response) {
logger.info("Completed " + response.toString());
checkCounter();
}
#Override
public void failed(final Exception ex) {
logger.info("Failed " + ex.getMessage());
checkCounter();
}
#Override
public void cancelled() {
logger.info("Cancelled ");
checkCounter();
}
});
}
private void checkCounter() {
counter.set(counter.get() + 1);
if (counter.get() == UJPPHighTrafficClient.iterations) {
try {
requestsReactor.shutdown();
} catch (Exception ex) {
}
}
}
}
You code is timing how long it is to set up 1000 iterations of http connection, and not the time to complete those connections many of which are still running 3-4 seconds later. To see a more accurate figure put a local field t0 into UJPPHighTrafficExecutor:
public class UJPPHighTrafficExecutor {
long t0 = System.nanoTime();
...and then checkCounter() can print a time for completing all iterations:
private void checkCounter() {
counter.set(counter.get() + 1);
if (counter.get() == UJPPHighTrafficClient.iterations) {
try {
requestsReactor.shutdown();
} catch (Exception ex) {
}
long t1 = System.nanoTime();
System.out.println("ELAPSED MILLIS: ~"+TimeUnit.NANOSECONDS.toMillis(t1-t0));
}
}
This will print a much larger number for 1000 iterations:
ELAPSED MILLIS: ~xxxx
Note that counter.set(counter.get() + 1) is not safe way to increment AtomicInteger , remove the line and increment inside the if statement:
if (counter.incrementAndGet() == UJPPHighTrafficClient.iterations)
As in several research papers [1], [2], [3] it seems appropriate to make use of the Principal Component Analysis (PCA) algorithm for feature extraction. To attempt to accomplish this I have produced a traffic sniffer program in java making use of the jpcap library (snippet below), which is able to extract various features from live network traffic.
But I have not found any way to actually implement this, as existing approaches (snippets below) all seem to operate on non-live pre-organised datasets such as in the form of ARFF files.
I have seen a number of research papers [4], [5], [6] that reference the use of the PCA algorithm in combination jpcap library, that said I have not been able to find an explanation as to how this was accomplished this.
To be clear:
My question is, how can I implement PCA (using any approach) to extract features from live packets (attained via used of the pcap library referenced above) in Java? as others have accomplished (some examples have been referenced above)
Sniffer Example (using Jpcap Library)
Sniffer.java
import jpcap.JpcapCaptor;
import jpcap.NetworkInterface;
import jpcap.packet.Packet;
import jpcap.*;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.xml.bind.DatatypeConverter;
import java.util.List;
public class Sniffer {
public static NetworkInterface[] NETWORK_INTERFACES;
public static JpcapCaptor CAP;
jpcap_thread THREAD;
public static int INDEX = 0;
public static int flag = 0;
public static int COUNTER = 0;
static boolean CaptureState = false;
public static int No = 0;
JpcapWriter writer = null;
List<Packet> packetList = new ArrayList<>();
public static ArrayList<Object[]> packetInfo = new ArrayList<>();
public static void CapturePackets() {
THREAD = new jpcap_thread() {
public Object construct() {
try {
CAP = JpcapCaptor.openDevice(NETWORK_INTERFACES[INDEX], 65535, false, 20);
// writer = JpcapWriter.openDumpFile(CAP, "captureddata");
if ("UDP".equals(filter_options.getSelectedItem().toString())) {
CAP.setFilter("udp", true);
} else if ("TCP".equals(filter_options.getSelectedItem().toString())) {
CAP.setFilter("tcp", true);
} else if ("ICMP".equals(filter_options.getSelectedItem().toString())) {
CAP.setFilter("icmp", true);
}
while (CaptureState) {
CAP.processPacket(1, new PacketContents());
packetList.add(CAP.getPacket());
}
CAP.close();
} catch (Exception e) {
System.out.print(e);
}
return 0;
}
public void finished() {
this.interrupt();
}
};
THREAD.start();
}
public static void main(String[] args) {
CaptureState = true;
CapturePackets();
}
public void saveToFile() {
THREAD = new jpcap_thread() {
public Object construct() {
writer = null;
try {
CAP = JpcapCaptor.openDevice(NETWORK_INTERFACES[INDEX], 65535, false, 20);
writer = JpcapWriter.openDumpFile(CAP, "captured_data.txt");
} catch (IOException ex) {
Logger.getLogger(Sniffer.class.getName()).log(Level.SEVERE, null, ex);
}
for (int i = 0; i < No; i++) {
writer.writePacket(packetList.get(i));
}
return 0;
}
public void finished() {
this.interrupt();
}
};
THREAD.start();
}
}
PacketContents.java
import jpcap.PacketReceiver;
import jpcap.packet.Packet;
import javax.swing.table.DefaultTableModel;
import jpcap.packet.TCPPacket;
import jpcap.packet.UDPPacket;
import java.util.ArrayList;
import java.util.List;
import jpcap.packet.ICMPPacket;
public class PacketContents implements PacketReceiver {
public static TCPPacket tcp;
public static UDPPacket udp;
public static ICMPPacket icmp;
public void recievePacket(Packet packet) {
}
#Override
public void receivePacket(Packet packet) {
if (packet instanceof TCPPacket) {
tcp = (TCPPacket) packet;
Sniffer.packetInfo.add(new Object[] { sniffer.No, tcp.length, tcp.src_ip, tcp.dst_ip, "TCP", tcp.src_port,
tcp.dst_port, tcp.ack, tcp.ack_num, tcp.data, tcp.sequence, tcp.offset, tcp.header });
sniffer.No++;
} else if (packet instanceof UDPPacket) {
udp = (UDPPacket) packet;
Sniffer.packetInfo.add(new Object[] { sniffer.No, udp.length, udp.src_ip, udp.dst_ip, "UDP", udp.src_port,
udp.dst_port, udp.data, udp.offset, udp.header });
sniffer.No++;
} else if (packet instanceof ICMPPacket) {
icmp = (ICMPPacket) packet;
Sniffer.packetInfo.add(new Object[] { sniffer.No, icmp.length, icmp.src_ip, icmp.dst_ip, "ICMP",
icmp.checksum, icmp.header, icmp.offset, icmp.orig_timestamp, icmp.recv_timestamp,
icmp.trans_timestamp, icmp.data });
sniffer.No++;
}
}
}
Using the Weka Library to execute the PCA algorithm on an ARFF file
WekaPCA.java
package project;
import weka.core.Instances;
import weka.core.converters.ArffLoader;
import weka.core.converters.ConverterUtils;
import weka.core.converters.TextDirectoryLoader;
import java.io.File;
import org.math.plot.FrameView;
import org.math.plot.Plot2DPanel;
import org.math.plot.PlotPanel;
import org.math.plot.plots.ScatterPlot;
import weka.attributeSelection.PrincipalComponents;
import weka.attributeSelection.Ranker;
public class PCA {
public static void main(String[] args) {
try {
// Load data
String InputFilename = "kdd99.arff";
ArffLoader loader = new ArffLoader();
loader.setSource(new File(InputFilename));
Instances data = loader.getDataSet();
// Perform PCA
PrincipalComponents pca = new PrincipalComponents();
pca.setVarianceCovered(1.0);
pca.setTransformBackToOriginal(false);
pca.buildEvaluator(data);
// Show transformed data
Instances transformedData = pca.transformedData();
System.out.println(transformedData);
}
catch (Exception e) {
e.printStackTrace();
I'm new to Netty and I wrote based on an example I found a Netty http server, that keeps http connections open to send server-sent-events to the browser client.
Problem is that it only accepts up to about ~5 connections and after that blocks new connections. I googled and found most answers said to set SO_LOGBACK to a higher value. Tried different values and while I saw no difference. I even set it to MAX_INTEGER value and still had only 5 connections.
Server code (Using Netty version 4.1.6.Final):
package server;
import static io.netty.buffer.Unpooled.copiedBuffer;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpServerCodec;
import io.netty.handler.codec.http.HttpVersion;
public class NettyHttpServer {
private ChannelFuture channel;
private final EventLoopGroup masterGroup;
public NettyHttpServer() {
masterGroup = new NioEventLoopGroup(100);
}
public void start() {
try {
final ServerBootstrap bootstrap = new ServerBootstrap().group(masterGroup)
.channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer < SocketChannel > () {
#Override
public void initChannel(final SocketChannel ch) throws Exception {
ch.pipeline().addLast("codec", new HttpServerCodec());
ch.pipeline().addLast("aggregator", new HttpObjectAggregator(512 * 1024));
ch.pipeline().addLast("request", new ChannelInboundHandlerAdapter() {
#Override
public void channelRead(final ChannelHandlerContext ctx, final Object msg)
throws Exception {
System.out.println(msg);
registerToPubSub(ctx, msg);
}
#Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.writeAndFlush(new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.INTERNAL_SERVER_ERROR,
copiedBuffer(cause.getMessage().getBytes())));
}
});
}
}).option(ChannelOption.SO_BACKLOG, Integer.MAX_VALUE)
.childOption(ChannelOption.SO_KEEPALIVE, true);
channel = bootstrap.bind(8081).sync();
// channels.add(bootstrap.bind(8080).sync());
} catch (final InterruptedException e) {}
}
public void shutdown() {
masterGroup.shutdownGracefully();
try {
channel.channel().closeFuture().sync();
} catch (InterruptedException e) {}
}
private void registerToPubSub(final ChannelHandlerContext ctx, Object msg) {
new Thread() {
#Override
public void run() {
while (true) {
final String responseMessage = "data:abcdef\n\n";
FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK,
copiedBuffer(responseMessage.getBytes()));
response.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
response.headers().set(HttpHeaders.Names.CONTENT_TYPE, "text/event-stream");
response.headers().set(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
response.headers().set("Cache-Control", "no-cache");
ctx.writeAndFlush(response);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
}.start();
}
public static void main(String[] args) {
new NettyHttpServer().start();
}
}
Client js code (I run it more than 5 times from my browser in different tabs, and the not all of them get:
var source = new EventSource("http://localhost:8081");
source.onmessage = function(event) {
console.log(event.data);
};
source.onerror= function(err){console.log(err); source.close()};
source.onopen = function(event){console.log('open'); console.log(event)}
You need to let the browser know that you are done sending the response, and for that you have three options.
Set a content length
Send it chunked
Close the connection when you are done
You aren't doing any of those. I suspect your browser is still waiting for the full response to each request you send, and is using a new connection for each request in your testing. After 5 requests your browser must be refusing to create new connections.
Another thing I noticed is that you are creating a new thread for each request in your server, and never letting it die. That will cause problems down the line as you try to scale. If you really want that code to run in a different thread then I suggest looking at overloaded methods for adding handlers to the pipeline; those should let you specify a thread pool to run them in.
I'm learing Netty and i'm trying to implement a simple counter, where all the clients shares an Integer and they input a number and the Integer value increments by that number.
Here is my code:
Server.java
package nettyincvalue;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import java.util.logging.Level;
import java.util.logging.Logger;
public class Server {
private int port;
private Integer value;
public Server(int port) {
this.port = port;
this.value = 0;
}
public void run(){
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap server = new ServerBootstrap();
server.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ServerInit(this.value))
.option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true);
ChannelFuture f = server.bind(port).sync();
f.channel().closeFuture().sync();
} catch (InterruptedException ex) {
Logger.getLogger(Server.class.getName()).log(Level.SEVERE, null, ex);
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
public static void main(String[] args) {
new Server(12345).run();
}
}
ServerInit.java
package nettyincvalue;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.string.StringDecoder;
public class ServerInit extends ChannelInitializer<SocketChannel> {
private Integer value;
public ServerInit(Integer value) {
this.value = value;
}
#Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new StringDecoder());
pipeline.addLast(new ServerHandler(this.value));
}
}
ServerHandler.java
package nettyincvalue;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
public class ServerHandler extends ChannelInboundHandlerAdapter {
private Integer value;
public ServerHandler(Integer value) {
this.value = value;
}
#Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
String s = (String) msg;
try {
Integer i = Integer.parseInt(s.substring(0, s.length() - 1));
this.value += i;
System.out.println("Value its now: " + this.value);
} catch (NumberFormatException n) {
System.out.println("Not a number received");
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
System.err.println(cause.getMessage());
ctx.close();
}
}
Individually, its working, when a client inputs via nc a number it increments but its not globally, i mean when a different client starts, the counter its set to 0.
Your variable doesn't have proper synchronization; it should generally be declared volatile to make sure that all threads see updated values, and you'll need to synchronize the blocks where you're using it. However, in your case, using an AtomicInteger will be simpler and more efficient.
I am trying to implement a TCP Server in Java using nio.
Its simply using the Selector's select method to get the ready keys. And then processing those keys if they are acceptable, readable and so. Server is working just fine till im using a single thread. But when im trying to use more threads to process the keys, the server's response gets slowed and eventually stops responding, say after 4-5 requests.
This is all what im doing:(Pseudo)
Iterator<SelectionKey> keyIterator = selector.selectedKeys().iterator();
while (keyIterator.hasNext()) {
SelectionKey readyKey = keyIterator.next();
if (readyKey.isAcceptable()) {
//A new connection attempt, registering socket channel with selector
} else {
Worker.add( readyKey );
}
Worker is the thread class that performs Input/Output from the channel.
This is the code of my Worker class:
private static List<SelectionKey> keyPool = Collections.synchronizedList(new LinkedList());
public static void add(SelectionKey key) {
synchronized (keyPool) {
keyPool.add(key);
keyPool.notifyAll();
}
}
public void run() {
while ( true ) {
SelectionKey myKey = null;
synchronized (keyPool) {
try {
while (keyPool.isEmpty()) {
keyPool.wait();
}
} catch (InterruptedException ex) {
}
myKey = keyPool.remove(0);
keyPool.notifyAll();
}
if (myKey != null && myKey.isValid() ) {
if (myKey.isReadable()) {
//Performing reading
} else if (myKey.isWritable()) {
//performing writing
myKey.cancel();
}
}
}
My basic idea is to add the key to the keyPool from which various threads can get a key, one at a time.
My BaseServer class itself is running as a thread. It is creating 10 Worker threads before the event loop to begin. I also tried to increase the priority of BaseServer thread, so that it gets more chance to accept the acceptable keys. Still, to it stops responding after approx 8 requests. Please help, were I am going wrong. Thanks in advance. :)
Third, you aren't removing anything from the selected-key set. You must do that every time around the loop, e.g. by calling keyIterator.remove() after you call next().
You need to read the NIO Tutorials.
First of all, you should not really be using wait() and notify() calls anymore since there exist good Standard Java (1.5+) wrapper classes in java.util.concurrent, such as BlockingQueue.
Second, it's suggested to do IO in the selecting thread itself, not in the worker threads. The worker threads should just queue up reads/and writes to the selector thread(s).
This page explains it pretty good and even provides working code samples of a simple TCP/IP server: http://rox-xmlrpc.sourceforge.net/niotut/
Sorry, I don't yet have time to look at your specific example.
Try using xsocket library. It saved me a lot of time reading on forums.
Download: http://xsocket.org/
Tutorial: http://xsocket.sourceforge.net/core/tutorial/V2/TutorialCore.htm
Server Code:
import org.xsocket.connection.*;
/**
*
* #author wsserver
*/
public class XServer {
protected static IServer server;
public static void main(String[] args) {
try {
server = new Server(9905, new XServerHandler());
server.start();
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
protected static void shutdownServer(){
try{
server.close();
}catch(Exception ex){
System.out.println(ex.getMessage());
}
}
}
Server Handler:
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.util.*;
import org.xsocket.*;
import org.xsocket.connection.*;
public class XServerHandler implements IConnectHandler, IDisconnectHandler, IDataHandler {
private Set<ConnectedClients> sessions = Collections.synchronizedSet(new HashSet<ConnectedClients>());
Charset charset = Charset.forName("ISO-8859-1");
CharsetEncoder encoder = charset.newEncoder();
CharsetDecoder decoder = charset.newDecoder();
ByteBuffer buffer = ByteBuffer.allocate(1024);
#Override
public boolean onConnect(INonBlockingConnection inbc) throws IOException, BufferUnderflowException, MaxReadSizeExceededException {
try {
synchronized (sessions) {
sessions.add(new ConnectedClients(inbc, inbc.getRemoteAddress()));
}
System.out.println("onConnect"+" IP:"+inbc.getRemoteAddress().getHostAddress()+" Port:"+inbc.getRemotePort());
} catch (Exception ex) {
System.out.println("onConnect: " + ex.getMessage());
}
return true;
}
#Override
public boolean onDisconnect(INonBlockingConnection inbc) throws IOException {
try {
synchronized (sessions) {
sessions.remove(inbc);
}
System.out.println("onDisconnect");
} catch (Exception ex) {
System.out.println("onDisconnect: " + ex.getMessage());
}
return true;
}
#Override
public boolean onData(INonBlockingConnection inbc) throws IOException, BufferUnderflowException, ClosedChannelException, MaxReadSizeExceededException {
inbc.read(buffer);
buffer.flip();
String request = decoder.decode(buffer).toString();
System.out.println("request:"+request);
buffer.clear();
return true;
}
}
Connected Clients:
import java.net.InetAddress;
import org.xsocket.connection.INonBlockingConnection;
/**
*
* #author wsserver
*/
public class ConnectedClients {
private INonBlockingConnection inbc;
private InetAddress address;
//CONSTRUCTOR
public ConnectedClients(INonBlockingConnection inbc, InetAddress address) {
this.inbc = inbc;
this.address = address;
}
//GETERS AND SETTERS
public INonBlockingConnection getInbc() {
return inbc;
}
public void setInbc(INonBlockingConnection inbc) {
this.inbc = inbc;
}
public InetAddress getAddress() {
return address;
}
public void setAddress(InetAddress address) {
this.address = address;
}
}
Client Code:
import java.net.InetAddress;
import org.xsocket.connection.INonBlockingConnection;
import org.xsocket.connection.NonBlockingConnection;
/**
*
* #author wsserver
*/
public class XClient {
protected static INonBlockingConnection inbc;
public static void main(String[] args) {
try {
inbc = new NonBlockingConnection(InetAddress.getByName("localhost"), 9905, new XClientHandler());
while(true){
}
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
}
Client Handler:
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import org.xsocket.MaxReadSizeExceededException;
import org.xsocket.connection.IConnectExceptionHandler;
import org.xsocket.connection.IConnectHandler;
import org.xsocket.connection.IDataHandler;
import org.xsocket.connection.IDisconnectHandler;
import org.xsocket.connection.INonBlockingConnection;
/**
*
* #author wsserver
*/
public class XClientHandler implements IConnectHandler, IDataHandler,IDisconnectHandler, IConnectExceptionHandler {
Charset charset = Charset.forName("ISO-8859-1");
CharsetEncoder encoder = charset.newEncoder();
CharsetDecoder decoder = charset.newDecoder();
ByteBuffer buffer = ByteBuffer.allocate(1024);
#Override
public boolean onConnect(INonBlockingConnection nbc) throws IOException {
System.out.println("Connected to server");
nbc.write("hello server\r\n");
return true;
}
#Override
public boolean onConnectException(INonBlockingConnection nbc, IOException ioe) throws IOException {
System.out.println("On connect exception:"+ioe.getMessage());
return true;
}
#Override
public boolean onDisconnect(INonBlockingConnection nbc) throws IOException {
System.out.println("Dissconected from server");
return true;
}
#Override
public boolean onData(INonBlockingConnection inbc) throws IOException, BufferUnderflowException, ClosedChannelException, MaxReadSizeExceededException {
inbc.read(buffer);
buffer.flip();
String request = decoder.decode(buffer).toString();
System.out.println(request);
buffer.clear();
return true;
}
}
I spent a lot of time reading on forums about this, i hope i can help u with my code.