Netty Channels Randomly closing - java

My problem is that, at random times when several channels are connected to my game, there will be a disconnection causing all of the channels to be closed. The channelDisconnected method is called, and I've done a stacktrace printout of this occurrence and everything seems normal. My code is below; what is the problem?
public final class ServerChannelHandler extends SimpleChannelHandler {
private static ChannelGroup channels;
private static ServerBootstrap bootstrap;
public static final void init() {
new ServerChannelHandler();
}
/**
* Gets the amount of channels that are currently connected to us
* #return A {#code Integer} {#code Object}
*/
public static int getConnectedChannelsSize() {
return channels == null ? 0 : channels.size();
}
/**
* Creates a new private constructor of this class
*/
private ServerChannelHandler() {
channels = new DefaultChannelGroup();
bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(CoresManager.serverBossChannelExecutor, CoresManager.serverWorkerChannelExecutor, CoresManager.serverWorkersCount));
bootstrap.getPipeline().addLast("handler", this);
bootstrap.setOption("reuseAddress", true); // reuses adress for bind
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.TcpAckFrequency", true);
bootstrap.setOption("child.keepAlive", true);
bootstrap.bind(new InetSocketAddress(Constants.PORT_ID));
}
/**
* What happens when a new channel is open and connects to us
*/
#Override
public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e) {
channels.add(e.getChannel());
}
/**
* What happens when an open channel closes the connection with us
*/
#Override
public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) {
channels.remove(e.getChannel());
}
#Override
public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) {
ctx.setAttachment(new Session(e.getChannel()));
}
#Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) {
Object sessionObject = ctx.getAttachment();
if (sessionObject != null && sessionObject instanceof Session) {
Session session = (Session) sessionObject;
if (session.getDecoder() == null)
return;
if (session.getDecoder() instanceof WorldPacketsDecoder) {
session.getWorldPackets().getPlayer().finish();
}
}
}
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
if (!(e.getMessage() instanceof ChannelBuffer))
return;
Object sessionObject = ctx.getAttachment();
if (sessionObject != null && sessionObject instanceof Session) {
Session session = (Session) sessionObject;
if (session.getDecoder() == null)
return;
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
buf.markReaderIndex();
int avail = buf.readableBytes();
if (avail < 1 || avail > Constants.RECEIVE_DATA_LIMIT) {
return;
}
byte[] buffer = new byte[avail];
buf.readBytes(buffer);
try {
session.getDecoder().decode(new InputStream(buffer));
} catch (Throwable er) {
er.printStackTrace();
}
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent ee) throws Exception {
ee.getCause().printStackTrace();
}
/**
* On server shutdown, all channels are closed and the external resources
* are released from the {{#link #bootstrap}
*/
public static final void shutdown() {
channels.close().awaitUninterruptibly();
bootstrap.releaseExternalResources();
}
}
StackTrace:
java.lang.Exception: Stack trace
at java.lang.Thread.dumpStack(Thread.java:1364)
at com.sallesy.game.player.Player.realFinish(Player.java:854)
at com.sallesy.game.player.Player.finish(Player.java:848)
at com.sallesy.game.player.Player.finish(Player.java:815)
at com.sallesy.networking.ServerChannelHandler.channelDisconnected(ServerChannelHandler.java:71)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:120)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:558)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:553)
at org.jboss.netty.channel.Channels.fireChannelDisconnected(Channels.java:399)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.close(AbstractNioWorker.java:721)
at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.handleAcceptedSocket(NioServerSocketPipelineSink.java:111)
at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.eventSunk(NioServerSocketPipelineSink.java:66)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendDownstream(DefaultChannelPipeline.java:774)
at org.jboss.netty.channel.SimpleChannelHandler.closeRequested(SimpleChannelHandler.java:338)
at org.jboss.netty.channel.SimpleChannelHandler.handleDownstream(SimpleChannelHandler.java:260)
at org.jboss.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:585)
at org.jboss.netty.channel.DefaultChannelPipeline.sendDownstream(DefaultChannelPipeline.java:576)
at org.jboss.netty.channel.Channels.close(Channels.java:820)
at org.jboss.netty.channel.AbstractChannel.close(AbstractChannel.java:197)
at org.jboss.netty.channel.ChannelFutureListener$1.operationComplete(ChannelFutureListener.java:41)
at org.jboss.netty.channel.DefaultChannelFuture.notifyListener(DefaultChannelFuture.java:428)
at org.jboss.netty.channel.DefaultChannelFuture.addListener(DefaultChannelFuture.java:145)
at com.sallesy.networking.encoders.WorldPacketsEncoder.sendLogout(WorldPacketsEncoder.java:1179)
at com.sallesy.game.player.Player.logout(Player.java:801)
at com.sallesy.networking.decoders.handlers.ButtonHandler.handleButtons(ButtonHandler.java:227)
at com.sallesy.networking.decoders.WorldPacketsDecoder.processPackets(WorldPacketsDecoder.java:1122)
at com.sallesy.networking.decoders.WorldPacketsDecoder.decode(WorldPacketsDecoder.java:297)
at com.sallesy.networking.ServerChannelHandler.messageReceived(ServerChannelHandler.java:100)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:558)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:553)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:84)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.processSelectedKeys(AbstractNioWorker.java:471)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:332)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:35)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:102)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:744)

Bit of a guess really but could be keepalive timeouts.
You could try using an IdleStateHandler to keep connections alive.

Related

Apache kafka connect - custom websocket connector

The goal is :develop a custom Kafka connector that read ,messages from the websocket in a loop method. I try to give you an example on what I've realized:
I create an interface IWebsocketClientEndpoint
public interface IWebsocketClientEndpoint {
IWebsocketClientEndpoint Connect() ;
void Disconnect() throws IOException;
IWebsocketClientEndpoint addMessageHandler(IMessageHandler msgHandler);
void SendMessage(String message) throws Exception;
void SendMessage(ByteBuffer message) throws Exception;
void SendMessage(Object message) throws Exception;
boolean isOpen();
void Dispose()throws IOException;
}
and a class that implement above interface:
#ClientEndpoint
public class WebsocketClientEndpoint implements IWebsocketClientEndpoint {
private WebSocketContainer _container;
private Session _userSession = null;
private IMessageHandler _messageHandler;
private URI _endpointURI;
private WebsocketClientEndpoint(URI endpointURI) {
try {
_endpointURI = endpointURI;
_container = ContainerProvider.getWebSocketContainer();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private WebsocketClientEndpoint(URI endpointURI, int bufferSize) {
try {
_endpointURI = endpointURI;
_container = ContainerProvider.getWebSocketContainer();
_container.setDefaultMaxBinaryMessageBufferSize(bufferSize);
_container.setDefaultMaxTextMessageBufferSize(bufferSize);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static IWebsocketClientEndpoint Create(URI endpointURI){
return new WebsocketClientEndpoint(endpointURI);
}
public static IWebsocketClientEndpoint Create(URI endpointURI,int bufferSize){
return new WebsocketClientEndpoint(endpointURI,bufferSize);
}
public IWebsocketClientEndpoint Connect() {
try {
_container.connectToServer(this, _endpointURI);
return this;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
#OnOpen
public void onOpen(Session userSession) {
this._userSession = userSession;
if (this._messageHandler != null) {
this._messageHandler.handleOpen("Web socket "+ _endpointURI +" opened");}
}
#OnClose
public void onClose(Session userSession, CloseReason reason) {
this._userSession = null;
if (this._messageHandler != null) {
this._messageHandler.handleClose("Web socket "+ _endpointURI +" closed. Reason: " + reason.getReasonPhrase());}
}
public void Disconnect() throws IOException {
CloseReason reason = new CloseReason(CloseReason.CloseCodes.NORMAL_CLOSURE,"Web socket closed by user");
this._userSession.close(reason);
this._userSession = null;
//close notification to caller
if (this._messageHandler != null) {
this._messageHandler.handleClose("Web socket "+ _endpointURI +" closed. Reason: " + reason.getReasonPhrase());}
}
#Override
public IWebsocketClientEndpoint addMessageHandler(IMessageHandler msgHandler) {
this._messageHandler = msgHandler;
return this;
}
#OnMessage
public void onMessage(String message) {
if (this._messageHandler != null) {
this._messageHandler.handleMessage(message);
}
}
#OnMessage
public void onMessage(ByteBuffer bytes) {
if (this._messageHandler != null) {
this._messageHandler.handleMessage(bytes);
}
}
public void SendMessage(String message) throws Exception {
try{
this._userSession.getAsyncRemote().sendText(message);
}catch (Exception ex){
throw ex;
}
}
public void SendMessage(ByteBuffer message) throws Exception {
try{
this._userSession.getAsyncRemote().sendBinary(message);
}catch (Exception ex){
throw ex;
}
}
public void SendMessage(Object message) throws Exception {
this._userSession.getAsyncRemote().sendObject(message);
}catch (Exception ex){
throw ex;
}
}
#Override
public boolean isOpen() {
if (this._userSession != null){
return this._userSession.isOpen();
}
return false;
}
}
The class WebsocketClientEndpoint is dedicated to the creation of websocket and manage of connection, disconnection, send and receive message.
The goal is: how can I adapt the my websocket structure in the kafka connect structure? I could queue the message received ("public void handleMessage(String s)) from the socket in a ConcurrentLinkedQueue, and then, in the kafka connect loop method, unqueue them. But is it the best solution?
Below, the implementation of my Kafka custom connector
My kafka Connector
public class MySourceTask extends SourceTask {
IWebsocketClientEndpoint _clientEndPoint;
#Override
public void start(Map<String, String> props) {
_clientEndPoint = WebsocketClientEndpoint
.Create(new URI(socket))
.Connect();
_clientEndPoint.addMessageHandler(new IMessageHandler() {
#Override
public void handleMessage(String s) {
}
#Override
public void handleMessage(ByteBuffer byteBuffer) {
}
#Override
public void handleClose(String s) {
}
#Override
public void handleOpen(String s) {
}
});
}
#Override
public List<SourceRecord> poll() throws InterruptedException {
return null;
}
#Override
public void stop() {
_clientEndPoint.Dispose();
}
}
Thanks in advance to anyone
I'd suggest adding the interface to the class
extends SourceTask implements IMessageHandler
Then
_clientEndPoint.addMessageHandler(this);
And when you implement handleMessage, add those strings to some queue. Inside the poll method, you would pop data off that queue to create SourceRecord objects to return.
Inside of stop, call this.handleClose and clean up other resources.

Why file transfer is very very slow on netty (4.1.5-Final)

In my socket based server implementation using Netty 4.1.5-Final, when I transfer the video/image file into chunks (20K Chunk Size) I found that around 350+ ms is the difference between two chunks, not sure how to reduce that.
Here is my main server code :
public class MultimediaServer extends Thread implements IMultimediaServer, BeanFactoryAware {
/**
* Logger Instance
*/
protected Logger logger = Logger.getLogger(this.getClass());
#Autowired
private Properties props;
private RequestHandler requestHandler;
private BeanFactory beanFactory;
private int port;
private int maxConnection;
private int timeout = 30000;
private EventLoopGroup bossGroup = null;
private EventLoopGroup workerGroup = null;
#Override
public void run() {
try {
bossGroup = new NioEventLoopGroup();
workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
#Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, Const.PACKET_HEADER_LENGTH, 0, Const.PACKET_HEADER_LENGTH));
ch.pipeline().addLast("messageDecoder", new MessageDecoder());
ch.pipeline().addLast("frameEncoder", new ResponseHandler(Const.PACKET_HEADER_LENGTH));
ch.pipeline().addLast("bytesEncoder", new ByteArrayEncoder());
ch.pipeline().addLast(getHandler());
}
}).option(ChannelOption.SO_BACKLOG, maxConnection)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.MAX_MESSAGES_PER_READ, Integer.MAX_VALUE)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, timeout)
.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 32 * 1024)
.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 128 * 1024)
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.TCP_NODELAY, true)
.childOption(ChannelOption.SO_REUSEADDR, true)
.childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, timeout)
.childOption(ChannelOption.MAX_MESSAGES_PER_READ, Integer.MAX_VALUE)
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 32 * 1024)
.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 128 * 1024);
// Bind and start to accept incoming connections.
ChannelFuture f = serverBootstrap.bind(this.port).sync();
// Wait until the server socket is closed.
// In this example, this does not happen, but you can do that to
// gracefully shut down your server.
f.channel().closeFuture().sync();
} finally {
workerGroup.shutdownGracefully();
bossGroup.shutdownGracefully();
}
} catch (Throwable e) {
logger.error("ERROR : While starting the Konvx service ", e);
}
}
#Override
public void startServer(int port) {
super.setName("KonvxMultimediaServer : " + port);
this.port = port;
this.start();
}
#Override
public void stopServer() {
workerGroup.shutdownGracefully();
bossGroup.shutdownGracefully();
}
public RequestHandler getRequestHandler() {
return requestHandler;
}
public void setRequestHandler(RequestHandler requestHandler) {
this.requestHandler = requestHandler;
}
/**
* Return Request Handler
* #return RequestHandler
*/
private RequestHandler getHandler() {
return (RequestHandler) beanFactory.getBean("requestHandler", RequestHandler.class);
}
#Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = beanFactory;
}
#Value("${konvx.maxConnection}")
public void setMaxConnection(String maxConnection) {
this.maxConnection = Integer.parseInt(maxConnection);
}
#Value("${konvx.socket.timeout}")
public void setTimeout(String timeout) {
this.timeout = Integer.parseInt(timeout);
}
}
Here is the channel handler
public class RequestHandler extends SimpleChannelInboundHandler<KonvxMessage> {
/**
* Logger Instance
*/
private Logger logger = Logger.getLogger(this.getClass());
#Autowired
private Router router;
#Autowired
protected UserPool userPool;
#Override
public void channelRead0(ChannelHandlerContext ctx, KonvxMessage message) throws Exception {
Packet packet = new Packet();
packet.setCtx(ctx);
try {
if (message == null) {
logger.warn("Warning - message is empty");
return;
}
// Throw the exception if in-bound message does not magic cookie
if (!message.hasCookie()) {
logger.error("ERROR: Bad Cookie :" + message);
return;
}
// Checking if user is a valid/registered to our application
if (!userPool.isValidUser(message.getUserId())) {
packet.writeMessage(KonvxMessageFactory.getInvalidUserMessage(message));
return;
}
packet.setInMessage(message);
router.route(packet);
} catch (Exception e) {
logger.error("ERROR : Whie receiving/processing the in-bound message ", e);
packet.writeMessage(KonvxMessageFactory.getErrorMessage(message, KonvxError.UNKNOWN_ERROR));
}
}
#Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.warn("WARN : Connection problem - " + cause.getMessage() + " Client address :" + ctx.channel().remoteAddress());
ctx.close();
return;
}
}
Here is the decoder for the packet -
public class MessageDecoder extends ByteToMessageDecoder {
/**
* Logger Instance
*/
protected Logger logger = Logger.getLogger(this.getClass());
#Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
// Parsing the object
String msg = null;
try {
byte[] bytes = new byte[in.readableBytes()];
if (bytes.length <= 0) {
logger.debug("Total readable bytes :" + in.readableBytes() + " exiting...");
return;
}
in.readBytes(bytes);
msg = new String(bytes, CharsetUtil.UTF_8);
// Return if message is empty
if (msg.isEmpty()) {
logger.warn("Message is empty...exiting...");
return;
}
KonvxMessage konvxMessage = JsonUtil.parseMessage(msg);
// Logging the incoming message
StringBuilder logMessage = new StringBuilder();
logMessage.append("Incoming message :").append(System.lineSeparator())
.append(konvxMessage)
.append(System.lineSeparator());
logger.info(logMessage.toString());
out.add(konvxMessage);
} catch (Throwable e) {
logger.error("ERROR : While receiving/parsing/decoding the message " + msg, e);
new Packet(ctx).writeMessage(KonvxMessageFactory.getParseFailedErrorMessage(msg));
}
}
}
Please help, how to fine tune the netty to improve the file transfer performance over socket between mobile device and my java server.

Getting response from Netty server in non-Netty client

This is my first question on StackOverflow and I hope I have adhered to the expected standards.
I have been taking over some code from someone else who isn't working here anymore and I'm pretty much stranded here. I searched and asked some colleagues (not too much Java experience unfortunately) but no-one seems to be able to help me. Searching didn't really help me either.
I'm sending Json requests to a Netty server from a client which intentionally is NOT implemented using Netty. For now it is just a simple Java socket, but the intention is to have a Flask client send requests to the Netty server. The requests arrive (both using Java Sockets and using Python Flask), and get properly processed in the pipeline, but I want to send a response to the client and although I suspect where in the code to send the response I'm clearly missing out on something as I don't get any response. Any suggestions?
The Java Socket client (note that the json1 and json2 strings have been omitted from the snippet here as they are rather long, but they are formatted properly). Posting requests using a Socket and the related output stream. The response part (with the input stream for the same socket) is just some test which I have my doubt about, but not sure how to do this otherwise (and that's why I kept it here). I've been seeing plenty of examples with clients implementing Netty interfaces and that seems to work fine, but as said I want a client not using Netty to be able to receive the responses as well (if that's possible at all).
String serverResponse;
for (int j = 0; j < 100; j++) {
for (int i = 0; i < 1000; i++) {
try {
Socket s = new Socket("localhost", 12000);
PrintWriter out = new PrintWriter(s.getOutputStream(), true);
out.write(json1 + i + json2);
out.flush();
// Testing only - trying to get the response back from the server
BufferedReader in = new BufferedReader(new InputStreamReader(s.getInputStream()));
while(true) {
if ((serverResponse = in.readLine()) != null) {
log.info("server says", serverResponse);
break;
}
}
out.close();
s.close();
Thread.sleep(1000);
} catch (Exception ex) {
ex.printStackTrace();
}
}
Thread.sleep(2000);
}
MCTcpServer.java
/**
* Abstract TCP Server class. this class should be implemented in the subclass to implement an actual server.
*
* #param <R> The data to be read from the socket.
* #param <W> data to be written (in case of duplex) from the socket.
*/
public abstract class MFTcpServer<R, W> {
protected final AtomicBoolean started;
protected MFTcpServer() {
this.started = new AtomicBoolean();
}
/**
* Start the server.
*
* #param initializer the channel initializers. they will be called when a new client connects to the server.
* #return instance of tcp server
*/
public final MFTcpServer<R, W> start(ChannelInitializer<Channel> initializer) {
if (!started.compareAndSet(false, true)) {
throw new IllegalStateException("Server already started");
}
doStart(initializer);
return this;
}
/**
* Start the server and wait for all the threads to be finished before shutdown.
* #param initializer the channel initializers. they will be called when a new client connects to the server.
*/
public final void startAndAwait(ChannelInitializer<Channel> initializer) {
start(initializer);
awaitShutdown();
}
/**
* Shutdown the server
* #return true if successfully shutdown.
*/
public final boolean shutdown() {
return !started.compareAndSet(true, false) || doShutdown();
}
/**
* Wait for all the threads to be finished before shutdown.
*/
public abstract void awaitShutdown();
/**
* Do the shutdown now.
* #return true if successfully shutdown
*/
public abstract boolean doShutdown();
/**
* start the server
* #param initializer the channel initializers. they will be called when a new client connetcs to the server.
* #return instance of tcp server
*/
public abstract MFTcpServer<R, W> doStart(ChannelInitializer<Channel> initializer);
/**
*
* #return the port where the server is running.
*/
public abstract int getPort();
MFNetty4TcpServer.java Actual server implementation
public class MFNetty4TcpServer<R, W> extends MFTcpServer<R, W> {
private static final Logger logger = LoggerFactory.getLogger(MFNetty4TcpServer.class);
private static final int BOSS_THREAD_POOL_SIZE = 2;
private int port;
private ServerBootstrap bootstrap;
private ChannelFuture bindFuture;
/**
* The constructor.
*
* #param port port where to listen
*/
protected MFNetty4TcpServer(int port) {
this.port = port;
final NioEventLoopGroup bossGroup = new NioEventLoopGroup(0, new DefaultEventExecutorGroup
(BOSS_THREAD_POOL_SIZE));
final NioEventLoopGroup workerGroup = new NioEventLoopGroup(0, new DefaultEventExecutorGroup
(JsonProducerConfig.THREAD_POOL_SIZE));
bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class);
}
#Override
public MFNetty4TcpServer<R, W> doStart(ChannelInitializer<Channel> initializer) {
bootstrap.childHandler(new ChannelInitializer<Channel>() {
#Override
protected void initChannel(Channel ch) throws Exception {
if (initializer != null) {
ch.pipeline().addLast(initializer);
}
}
});
try {
bindFuture = bootstrap.bind(port).sync();
if (!bindFuture.isSuccess()) {
// Connection not successful
throw new RuntimeException(bindFuture.cause());
}
SocketAddress localAddress = bindFuture.channel().localAddress();
if (localAddress instanceof InetSocketAddress) {
port = ((InetSocketAddress) localAddress).getPort();
logger.info("Started server at port: " + port);
}
} catch (InterruptedException e) {
logger.error("Error waiting for binding server port: " + port, e);
}
return this;
}
#Override
public void awaitShutdown() {
try {
bindFuture.channel().closeFuture().await();
} catch (InterruptedException e) {
Thread.interrupted(); // Reset the interrupted status
logger.error("Interrupted while waiting for the server socket to close.", e);
}
}
#Override
public boolean doShutdown() {
try {
bindFuture.channel().close().sync();
return true;
} catch (InterruptedException e) {
logger.error("Failed to shutdown the server.", e);
return false;
}
}
#Override
public int getPort() {
return port;
}
/**
* Creates a tcp server at the defined port.
*
* #param port port to listen to
* #param <R> data to be read
* #param <W> data to be written back. Only in case of duplex connection.
* #return instance of tcp server.
*/
public static <R, W> MFTcpServer<R, W> create(int port) {
return new MFNetty4TcpServer<>(port);
}
}
JsonProducerConfig.java The pipeline is setup here.
/**
* Spring Configuration class of the application.
*/
#Configuration
#Import({DatabusConfig.class})
public class JsonProducerConfig {
private static final Logger log = LoggerFactory.getLogger(JsonProducerConfig.class);
public static final int THREAD_POOL_SIZE = Runtime.getRuntime().availableProcessors() * 2;
public static final String TCP_SERVER = "tcpServer";
public static final String CHANNEL_PIPELINE_INITIALIZER = "channel_initializer";
public static final String MF_KAFKA_PRODUCER = "mf_kafka_producer";
public static final String JSON_AVRO_CONVERTOR = "jsonAvroConvertor";
#Value("#{systemProperties['tcpserver.port']?:'12000'}")
private String tcpServerPort;
#Bean(name = TCP_SERVER)
#Scope(ConfigurableBeanFactory.SCOPE_SINGLETON)
public MFTcpServer nettyTCPServer() {
return MFNetty4TcpServer.create(Integer.parseInt(tcpServerPort));
}
#Bean(name = MF_KAFKA_PRODUCER)
#Scope(ConfigurableBeanFactory.SCOPE_SINGLETON)
public MFKafkaProducer pushToKafka() {
return new MFKafkaProducer();
}
#Bean(name = JSON_AVRO_CONVERTOR)
#Scope(ConfigurableBeanFactory.SCOPE_SINGLETON)
public JsonAvroConvertor jsonAvroConvertor() {
return new JsonAvroConvertor();
}
/**
* This is where the pipeline is set for processing of events.
*
* #param jsonAvroConvertor converts json to avro
* #param kafkaProducer pushes to kafka
* #return chanenl initializers pipeline.
*/
#Bean(name = CHANNEL_PIPELINE_INITIALIZER)
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public ChannelInitializer<Channel> channelInitializers(JsonAvroConvertor jsonAvroConvertor,
MFKafkaProducer kafkaProducer) {
return new ChannelInitializer<Channel>() {
#Override
protected void initChannel(Channel channel) throws Exception {
if (log.isInfoEnabled())
log.info("initChannel - initing channel...");
channel.pipeline().addLast(new NioEventLoopGroup(0, new DefaultEventExecutorGroup(THREAD_POOL_SIZE)));
channel.pipeline().addLast(new JsonObjectDecoder(1048576));
channel.pipeline().addLast(jsonAvroConvertor);
channel.pipeline().addLast(kafkaProducer);
if (log.isInfoEnabled())
log.info("channel = " + channel.toString());
}
};
}
}
JsonProducer.java The main program
public class JsonProducer {
private static final Logger log = LoggerFactory.getLogger(JsonProducer.class);
private static MFTcpServer tcpServer;
/**
* Main startup method
*
* #param args not used
*/
public static void main(String[] args) {
System.setProperty("solschema", "false");
try {
// the shutdown hook.
Runtime.getRuntime().addShutdownHook(new Thread(
() -> {
if (tcpServer != null) {
tcpServer.shutdown();
}
}
));
AnnotationConfigApplicationContext context = new
AnnotationConfigApplicationContext(JsonProducerConfig.class);
tcpServer = (MFTcpServer) context.getBean(JsonProducerConfig.TCP_SERVER);
ChannelInitializer<Channel> channelInitializer = (ChannelInitializer<Channel>) context.
getBean(JsonProducerConfig.CHANNEL_PIPELINE_INITIALIZER);
tcpServer.startAndAwait(channelInitializer);
} catch (Exception t) {
log.error("Error while starting JsonProducer ", t);
System.exit(-1);
}
}
}
The MFKafkaProducer.java as the last channel in the pipeline. Note the ctx.writeAndFlush(msg) in the channelRead method which is where I understand the response should be initiated. But what after that. When running this channelFuture.isSuccess() evaluates to false. The response object was an attempt to a String response.
#ChannelHandler.Sharable
public class MFKafkaProducer extends ChannelInboundHandlerAdapter {
private static final Logger log = LoggerFactory.getLogger(MFKafkaProducer.class);
#Resource
ApplicationContext context;
#Resource(name = DatabusConfig.ADMIN)
Admin admin;
private Map<String, IProducer> streams = new HashMap<>();
#PreDestroy
public void stop() {
removeAllStreams(); // then stop writing to producers
}
/**
* #param clickRecord the record to be pushed to kafka
* #throws Exception
*/
public void handle(GenericRecord clickRecord) throws Exception {
Utf8 clientId = null;
try {
clientId = (Utf8) clickRecord.get(SchemaUtil.APP_ID);
stream(producer(clientId.toString()), clickRecord);
} catch (Exception e) {
String message = "Could not push click data for clientId:" + clientId;
log.warn("handle - " + message + "!!!", e);
assert clientId != null;
removeStream(clientId.toString());
}
}
/**
* removes all the streams
*/
private void removeAllStreams() {
Set<String> strings = streams.keySet();
for (String clientId : strings) {
removeStream(clientId);
}
}
/**
* removes a particular stream
*
* #param clientId the stream to be removed
*/
private void removeStream(String clientId) {
Assert.notEmpty(streams);
IProducer producer = streams.get(clientId);
producer.stopProducer();
streams.remove(clientId);
}
/**
* #param producer the producer where data needs to be written
* #param clickRecord teh record to be written
*/
private void stream(IProducer producer, GenericRecord clickRecord) {
producer.send(clickRecord);
}
/**
* This will create a producer in case it is not already created.
* If already created return the already present one
*
* #param clientId stream id
* #return the producer instance
*/
private IProducer producer(String clientId) {
if (streams.containsKey(clientId)) {
return streams.get(clientId);
} else {
IProducer producer = admin.createKeyTopicProducer(SchemaUtil.APP_ID, "test_" + clientId, new ICallback() {
#Override
public void onSuccess(long offset) {
if (log.isInfoEnabled())
log.info("onSuccess - Data at offset:" + offset + " send.");
}
#Override
public void onError(long offset, Exception ex) {
if (log.isInfoEnabled())
log.info("onError - Data at offset:" + offset + " failed. Exception: ", ex);
}
#Override
public void onStreamClosed() {
log.warn("onStreamClosed - Stream:" + clientId + " closed.");
removeStream(clientId);
}
});
producer.startProducer();
streams.put(clientId, producer);
return producer;
}
}
#Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
log.debug("KafkaProducer - channelRead() called with " + "ctx = [" + ctx + "], msg = [" + msg + "]");
if (msg instanceof GenericRecord) {
GenericRecord genericRecord = (GenericRecord) msg;
try {
handle(genericRecord);
log.debug("channelRead sending response");
Charset charset = Charset.defaultCharset();
ByteBuf response = Unpooled.copiedBuffer("Just a response", charset);
ChannelFuture future = ctx.writeAndFlush(msg);
future.addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture channelFuture) throws Exception {
if (channelFuture.isSuccess())
log.info("channelRead - future.operationComplete - Response has been delivered to all channels");
else
log.info("channelRead - future.operationComplete - Response has NOT been delivered to all channels");
}
});
} catch (Exception ex) {
log.error("Something went wrong processing the generic record: " + msg + "\n ", ex);
}
} else {
log.debug("KafkaProducer - msg not of Type Generic Record !!! " + msg);
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
log.error("Something went wrong writing to Kafka: \n", cause);
ctx.close();
}
}
Using ChannelFuture#cause() I noticed I was not serializing a ByteBuf object, but a GenericRecord instead. Using
ByteBuf response = Unpooled.copiedBuffer(genericRecord.toString(), charset);
ChannelFuture future = ctx.writeAndFlush(response);
the GenericRecord gets converted to a ButeBuf and sends a response using the writeAndFlush method.
The test client using a Socket implementation somehow never really received a response, but by using a SocketChannel this was resolved as well.

Java, Netty, TCP and UDP connection integration : No buffer space available for UDP connection

I have application which uses both TCP and UDP protocols. Main assumption is that the client connects to server via TCP protocol and when connection is established, UDP datagrams are being send.
I have to support two scenarios of connecting to server:
- client connects when server is running
- client connects when server is down and retries connection until server starts again
For the first scenario everything works pretty fine: I got working both connections.
The problem is with second scenario. When client tries few times to connect via TCP and finally connects, the UDP connection function throws an exception:
java.net.SocketException: No buffer space available (maximum connections reached?): bind
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:344)
at sun.nio.ch.DatagramChannelImpl.bind(DatagramChannelImpl.java:684)
at sun.nio.ch.DatagramSocketAdaptor.bind(DatagramSocketAdaptor.java:91)
at io.netty.channel.socket.nio.NioDatagramChannel.doBind(NioDatagramChannel.java:192)
at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:484)
at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1080)
at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:430)
at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:415)
at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:903)
at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:197)
at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:350)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:380)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357)
at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116)
at io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137)
at java.lang.Thread.run(Thread.java:722)
When I restart client application without doing anything with server, client will connect with any problems.
What can cause a problem?
In below I attach source code of classes. All source code comes from examples placed in official Netty project page. The only thing which I have midified is that I replaced static variables and functions with non-static ones. It was caused that in future I will need many TCP-UDP connections to multiple servers.
public final class UptimeClient {
static final String HOST = System.getProperty("host", "192.168.2.193");
static final int PORT = Integer.parseInt(System.getProperty("port", "2011"));
static final int RECONNECT_DELAY = Integer.parseInt(System.getProperty("reconnectDelay", "5"));
static final int READ_TIMEOUT = Integer.parseInt(System.getProperty("readTimeout", "10"));
private static UptimeClientHandler handler;
public void runClient() throws Exception {
configureBootstrap(new Bootstrap()).connect();
}
private Bootstrap configureBootstrap(Bootstrap b) {
return configureBootstrap(b, new NioEventLoopGroup());
}
#Override
protected Object clone() throws CloneNotSupportedException {
return super.clone(); //To change body of generated methods, choose Tools | Templates.
}
Bootstrap configureBootstrap(Bootstrap b, EventLoopGroup g) {
if(handler == null){
handler = new UptimeClientHandler(this);
}
b.group(g)
.channel(NioSocketChannel.class)
.remoteAddress(HOST, PORT)
.handler(new ChannelInitializer<SocketChannel>() {
#Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new IdleStateHandler(READ_TIMEOUT, 0, 0), handler);
}
});
return b;
}
void connect(Bootstrap b) {
b.connect().addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.cause() != null) {
handler.startTime = -1;
handler.println("Failed to connect: " + future.cause());
}
}
});
}
}
#Sharable
public class UptimeClientHandler extends SimpleChannelInboundHandler<Object> {
UptimeClient client;
public UptimeClientHandler(UptimeClient client){
this.client = client;
}
long startTime = -1;
#Override
public void channelActive(ChannelHandlerContext ctx) {
try {
if (startTime < 0) {
startTime = System.currentTimeMillis();
}
println("Connected to: " + ctx.channel().remoteAddress());
new QuoteOfTheMomentClient(null).run();
} catch (Exception ex) {
Logger.getLogger(UptimeClientHandler.class.getName()).log(Level.SEVERE, null, ex);
}
}
#Override
public void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
}
#Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (!(evt instanceof IdleStateEvent)) {
return;
}
IdleStateEvent e = (IdleStateEvent) evt;
if (e.state() == IdleState.READER_IDLE) {
// The connection was OK but there was no traffic for last period.
println("Disconnecting due to no inbound traffic");
ctx.close();
}
}
#Override
public void channelInactive(final ChannelHandlerContext ctx) {
println("Disconnected from: " + ctx.channel().remoteAddress());
}
#Override
public void channelUnregistered(final ChannelHandlerContext ctx) throws Exception {
println("Sleeping for: " + UptimeClient.RECONNECT_DELAY + 's');
final EventLoop loop = ctx.channel().eventLoop();
loop.schedule(new Runnable() {
#Override
public void run() {
println("Reconnecting to: " + UptimeClient.HOST + ':' + UptimeClient.PORT);
client.connect(client.configureBootstrap(new Bootstrap(), loop));
}
}, UptimeClient.RECONNECT_DELAY, TimeUnit.SECONDS);
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
void println(String msg) {
if (startTime < 0) {
System.err.format("[SERVER IS DOWN] %s%n", msg);
} else {
System.err.format("[UPTIME: %5ds] %s%n", (System.currentTimeMillis() - startTime) / 1000, msg);
}
}
}
public final class QuoteOfTheMomentClient {
private ServerData config;
public QuoteOfTheMomentClient(ServerData config){
this.config = config;
}
public void run() throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioDatagramChannel.class)
.option(ChannelOption.SO_BROADCAST, true)
.handler(new QuoteOfTheMomentClientHandler());
Channel ch = b.bind(0).sync().channel();
ch.writeAndFlush(new DatagramPacket(
Unpooled.copiedBuffer("QOTM?", CharsetUtil.UTF_8),
new InetSocketAddress("192.168.2.193", 8193))).sync();
if (!ch.closeFuture().await(5000)) {
System.err.println("QOTM request timed out.");
}
}
catch(Exception ex)
{
ex.printStackTrace();
}
finally {
group.shutdownGracefully();
}
}
}
public class QuoteOfTheMomentClientHandler extends SimpleChannelInboundHandler<DatagramPacket> {
#Override
public void channelRead0(ChannelHandlerContext ctx, DatagramPacket msg) throws Exception {
String response = msg.content().toString(CharsetUtil.UTF_8);
if (response.startsWith("QOTM: ")) {
System.out.println("Quote of the Moment: " + response.substring(6));
ctx.close();
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
If your server is Windows Server 2008 (R2 or R2 SP1), this problem is likely described and solved by this stackoverflow answer which refers to Microsoft KB article #2577795
This issue occurs because of a race condition in the Ancillary Function Driver
for WinSock (Afd.sys) that causes sockets to be leaked. With time, the issue
that is described in the "Symptoms" section occurs if all available socket
resources are exhausted.
If your server is Windows Server 2003, this problem is likely described and solved by this stackoverflow answer which refers to Microsoft KB article #196271
The default maximum number of ephemeral TCP ports is 5000 in the products that
are included in the "Applies to" section. A new parameter has been added in
these products. To increase the maximum number of ephemeral ports, follow these
steps...
...which basically means that you have run out of ephemeral ports.

Netty not closing channels normaly

I'm new with Netty. Now migrating my server, cause of high loads. I read lots of mans, doing everything like in examples, but have a bug: CPU load grows in time and very fast.
Analyzing heap and logs shows me, that channels, which throws exceptions (may be not only them) not closing normally, and keeps in worker's selectors. I got 150+ opened channels with 50 users online.
My code is follows.
Creating server:
networkServer = new ServerBootstrap(new NioServerSocketChannelFactory(bossExec, ioExec, 4));
networkServer.setOption("backlog", 500);
networkServer.setOption("connectTimeoutMillis", 10000);
networkServer.setPipelineFactory(new ServerPipelineFactory());
Channel channel = networkServer.bind(new InetSocketAddress(address, port));
Pipeline factory:
#Override
public ChannelPipeline getPipeline() throws Exception {
PacketFrameDecoder decoder = new PacketFrameDecoder();
PacketFrameEncoder encoder = new PacketFrameEncoder();
return Channels.pipeline(decoder, encoder, new PlayerHandler(decoder, encoder));
}
Decoder:
public class PacketFrameDecoder extends FrameDecoder {
#Override
public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
}
#Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
}
#Override
protected Object decode(ChannelHandlerContext arg0, Channel arg1, ChannelBuffer buffer) throws Exception {
try {
buffer.markReaderIndex();
Packet p = // ... doing decoding things
if(p != null)
return p;
// Reset buffer if not success
buffer.resetReaderIndex();
// Reset buffer if not success and got exception of not enough bytes in buffer
} catch(BufferUnderflowException e) {
buffer.resetReaderIndex();
} catch(ArrayIndexOutOfBoundsException aioobe) {
buffer.resetReaderIndex();
}
return null;
}
}
And handler:
#Override
public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
worker = new PlayerWorkerThread(this, e.getChannel());
}
#Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
worker.disconnectedFromChannel();
}
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
if(e.getChannel().isOpen())
worker.acceptPacket((Packet) e.getMessage());
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
Server.logger.log(Level.WARNING, "Exception from downstream", e.getCause());
ctx.getChannel().close();
}
Everything is working very well, except growing cpu load...

Categories