Netty 4.1(在OpenJDK 1.6.0_32和CentOS 6.4上)消息发送非常慢。根据分析器,DefaultChannelHandlerContext.writeAndFlush占运行时间的最大百分比(60%)。分析器中不强调解码过程。正在处理小消息,并且可能未正确设置引导选项(TCP_NODELAY为true且没有任何改进)? DefaultEventExecutorGroup在服务器和客户端都使用,以避免阻止Netty的主事件循环并运行服务器数据'和' ClientData'具有业务逻辑和发送消息的类是通过context.writeAndFlush(...)从那里完成的。有更合适/更快的方式吗?在编码器中使用直接ByteBuf.writeBytes(..)序列化和在解码器中使用ReplayingDecoder使编码速度没有差别。对于冗长的代码感到抱歉,“Netty In Action'书,也没有文件帮助。
客户端的JProfiler调用树:http://i62.tinypic.com/dw4e43.jpg
服务器类是:
public class NettyServer
{
EventLoopGroup incomingLoopGroup = null;
EventLoopGroup workerLoopGroup = null;
ServerBootstrap serverBootstrap = null;
int port;
DataServer dataServer = null;
DefaultEventExecutorGroup dataEventExecutorGroup = null;
DefaultEventExecutorGroup dataEventExecutorGroup2 = null;
public ChannelFuture serverChannelFuture = null;
public NettyServer(int port)
{
this.port = port;
DataServer = new DataServer(this);
}
public void run() throws Exception
{
incomingLoopGroup = new NioEventLoopGroup();
workerLoopGroup = new NioEventLoopGroup();
dataEventExecutorGroup = new DefaultEventExecutorGroup(5);
dataEventExecutorGroup2 = new DefaultEventExecutorGroup(5);
try
{
ChannelInitializer<SocketChannel> channelInitializer =
new ChannelInitializer<SocketChannel>()
{
@Override
protected void initChannel(SocketChannel ch)
throws Exception {
ch.pipeline().addLast(new MessageByteDecoder());
ch.pipeline().addLast(new MessageByteEncoder());
ch.pipeline().addLast(dataEventExecutorGroup, new DataServerInboundHandler(DataServer, NettyServer.this));
ch.pipeline().addLast(dataEventExecutorGroup2, new DataServerDataHandler(DataServer));
}
};
// bootstrap the server
serverBootstrap = new ServerBootstrap();
serverBootstrap.group(incomingLoopGroup, workerLoopGroup)
.channel(NioServerSocketChannel.class)
.childHandler(channelInitializer)
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.option(ChannelOption.TCP_NODELAY, true)
.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024)
.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024)
.childOption(ChannelOption.SO_KEEPALIVE, true);
serverChannelFuture = serverBootstrap.bind(port).sync();
serverChannelFuture.channel().closeFuture().sync();
}
finally
{
incomingLoopGroup.shutdownGracefully();
workerLoopGroup.shutdownGracefully();
}
}
}
客户端类:
public class NettyClient
{
Bootstrap clientBootstrap = null;
EventLoopGroup workerLoopGroup = null;
String serverHost = null;
int serverPort = -1;
ChannelFuture clientFutureChannel = null;
DataClient dataClient = null;
DefaultEventExecutorGroup dataEventExecutorGroup = new DefaultEventExecutorGroup(5);
DefaultEventExecutorGroup dataEventExecutorGroup2 = new DefaultEventExecutorGroup(5);
public NettyClient(String serverHost, int serverPort)
{
this.serverHost = serverHost;
this.serverPort = serverPort;
}
public void run() throws Exception
{
workerLoopGroup = new NioEventLoopGroup();
try
{
this.dataClient = new DataClient();
ChannelInitializer<SocketChannel> channelInitializer =
new ChannelInitializer<SocketChannel>()
{
@Override
protected void initChannel(SocketChannel ch)
throws Exception {
ch.pipeline().addLast(new MessageByteDecoder());
ch.pipeline().addLast(new MessageByteEncoder());
ch.pipeline().addLast(dataEventExecutorGroup, new ClientInboundHandler(dataClient, NettyClient.this)); ch.pipeline().addLast(dataEventExecutorGroup2, new ClientDataHandler(dataClient));
}
};
clientBootstrap = new Bootstrap();
clientBootstrap.group(workerLoopGroup);
clientBootstrap.channel(NioSocketChannel.class);
clientBootstrap.option(ChannelOption.SO_KEEPALIVE, true);
clientBootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
clientBootstrap.option(ChannelOption.TCP_NODELAY, true);
clientBootstrap.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
clientBootstrap.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
clientBootstrap.handler(channelInitializer);
clientFutureChannel = clientBootstrap.connect(serverHost, serverPort).sync();
clientFutureChannel.channel().closeFuture().sync();
}
finally
{
workerLoopGroup.shutdownGracefully();
}
}
}
邮件类:
public class Message implements Serializable
{
public static final byte MSG_FIELD = 0;
public static final byte MSG_HELLO = 1;
public static final byte MSG_LOG = 2;
public static final byte MSG_FIELD_RESPONSE = 3;
public static final byte MSG_MAP_KEY_VALUE = 4;
public static final byte MSG_STATS_FILE = 5;
public static final byte MSG_SHUTDOWN = 6;
public byte msgID;
public byte msgType;
public String key;
public String value;
public byte method;
public byte id;
}
解码器:
public class MessageByteDecoder extends ByteToMessageDecoder
{
private Kryo kryoCodec = new Kryo();
private int contentSize = 0;
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) //throws Exception
{
if (!buffer.isReadable() || buffer.readableBytes() < 4) // we need at least integer
return;
// read header
if (contentSize == 0) {
contentSize = buffer.readInt();
}
if (buffer.readableBytes() < contentSize)
return;
// read content
byte [] buf = new byte[contentSize];
buffer.readBytes(buf);
Input in = new Input(buf, 0, buf.length);
out.add(kryoCodec.readObject(in, Message.class));
contentSize = 0;
}
}
编码器:
public class MessageByteEncoder extends MessageToByteEncoder<Message>
{
Kryo kryoCodec = new Kryo();
public MessageByteEncoder()
{
super(false);
}
@Override
protected void encode(ChannelHandlerContext ctx, Message msg, ByteBuf out) throws Exception
{
int offset = out.arrayOffset() + out.writerIndex();
byte [] inArray = out.array();
Output kryoOutput = new OutputWithOffset(inArray, inArray.length, offset + 4);
// serialize message content
kryoCodec.writeObject(kryoOutput, msg);
// write length of the message content at the beginning of the array
out.writeInt(kryoOutput.position());
out.writerIndex(out.writerIndex() + kryoOutput.position());
}
}
客户端的业务逻辑在DefaultEventExecutorGroup中运行:
public class DataClient
{
ChannelHandlerContext ctx;
// ...
public void processData()
{
// ...
while ((line = br.readLine()) != null)
{
// ...
process = new CountDownLatch(columns.size());
for(Column c : columns)
{
// sending column data to the server for processing
ctx.channel().eventLoop().execute(new Runnable() {
@Override
public void run() {
ctx.writeAndFlush(Message.createMessage(msgID, processID, c.key, c.value));
}});
}
// block until all the processed column fields of this row are returned from the server
process.await();
// write processed line to file ...
}
// ...
}
// ...
}
客户的消息处理:
public class ClientInboundHandler extends ChannelInboundHandlerAdapter
{
DataClient dataClient = null;
// ...
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg)
{
// dispatch the message to the listeners
Message m = (Message) msg;
switch(m.msgType)
{
case Message.MSG_FIELD_RESPONSE: // message with processed data is received from the server
// decreases the 'process' CountDownLatch in the processData() method
dataClient.setProcessingResult(m.msgID, m.value);
break;
// ...
}
// forward the message to the pipeline
ctx.fireChannelRead(msg);
}
// ...
}
}
服务器的消息处理:
public class ServerInboundHandler extends ChannelInboundHandlerAdapter
{
private DataServer dataServer = null;
// ...
@Override
public void channelRead(ChannelHandlerContext ctx, Object obj) throws Exception
{
Message msg = (Message) obj;
switch(msg.msgType)
{
case Message.MSG_FIELD:
dataServer.processField(msg, ctx);
break;
// ...
}
ctx.fireChannelRead(msg);
}
//...
}
服务器的业务逻辑在DefaultEventExecutorGroup中运行:
public class DataServer
{
// ...
public void processField(final Message msg, final ChannelHandlerContext context)
{
context.executor().submit(new Runnable()
{
@Override
public void run()
{
String processedValue = (String) processField(msg.key, msg.value);
final Message responseToClient = Message.createResponseFieldMessage(msg.msgID, processedValue);
// send processed data to the client
context.channel().eventLoop().submit(new Runnable(){
@Override
public void run() {
context.writeAndFlush(responseToClient);
}
});
}
});
}
// ...
}
答案 0 :(得分:1)
请尝试使用CentOS 7.0。
我有类似的问题: 同样的Netty 4程序在CentOS 7.0上运行得非常快(大约40k msg / s),但在CentOS 6.3和6.5上写不到大约8k msg / s(我还没试过6.4)。 / p>
答案 1 :(得分:0)
无需向EventLoop提交内容。只需在DataClient和DataServer中直接调用Channel.writeAndFlush(...)即可。