netty面临大量数据包丢失和延迟

时间:2012-12-10 19:47:34

标签: tcp netty

我在Ubuntu上使用netty 3.5.11和Jdk 1.7以非常高的频率接收大量的股票价格更新。正在发送的消息格式是JSON。数据是从redis服务器上的主题订阅的。每个符号都有一个订阅者。通道对象传递给多个订阅服务器,并在接收数据时将其写入客户端。

现在收到的数据量在2分钟内约为25,000条记录。每个记录大小平均长约500字节。

在测试运行期间,大约有7500/8000条记录被删除,因为该频道不可写。 我该如何避免这种情况。 ?

我还注意到延迟会系统地增加,导致在很长一段时间后收到更新。当我使用Bufferedwritehandler来避免丢包时,就会发生这种情况。

以下是我在bootstrap上设置的选项。

executionHandler = new ExecutionHandler(
            new OrderedMemoryAwareThreadPoolExecutor(Runtime.getRuntime().availableProcessors() * 2, 1000000, 10000000, 100,
            TimeUnit.MILLISECONDS));
serverBootStrap.setPipelineFactory(new ChannelPipelineFactory()
    {
        @Override
        public ChannelPipeline getPipeline() throws Exception
        {
            return Channels.pipeline(new PortUnificationServerHandler(getConfiguration(), executionHandler));
        }
    });

    serverBootStrap.setOption("child.tcpNoDelay", true);
    serverBootStrap.setOption("tcpNoDelay", true);
    serverBootStrap.setOption("child.keepAlive", true);
    serverBootStrap.setOption("child.reuseAddress", true);
    //setting buffer size can improve I/O
    serverBootStrap.setOption("child.sendBufferSize", 16777216);
    serverBootStrap.setOption("receiveBufferSize", 16777216);//1048576);
    // better to have an receive buffer predictor 
    serverBootStrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory(1024, 1024 * 16, 16777216));//1048576));

    //if the server is sending 1000 messages per sec, optimum write buffer water marks will
    //prevent unnecessary throttling, Check NioSocketChannelConfig doc   
    serverBootStrap.setOption("backlog", 1000);
    serverBootStrap.setOption("sendBufferSize", 16777216);//1048576);
    serverBootStrap.setOption("writeBufferLowWaterMark", 1024 * 1024 * 25);
    serverBootStrap.setOption("writeBufferHighWaterMark", 1024 * 1024 * 50);

管道和处理程序类

public class PortUnificationServerHandler extends FrameDecoder

{

private AppConfiguration appConfiguration;
private final ExecutionHandler executionHandler;

public PortUnificationServerHandler(AppConfiguration pAppConfiguration, ExecutionHandler pExecutionHandler)
{
    appConfiguration = pAppConfiguration;
    this.executionHandler = pExecutionHandler;
}

@Override
protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer) throws Exception
{

    String lRequest = buffer.toString(CharsetUtil.UTF_8);
    if (ConnectionServiceHelper.isValidJSON(lRequest))
    {
        ObjectMapper lObjectMapper = new ObjectMapper();
        StringReader lStringReader = new StringReader(lRequest);
        JsonNode lNode = lObjectMapper.readTree(lStringReader);
        if (lNode.get(Constants.REQUEST_TYPE).asText().trim().equalsIgnoreCase(Constants.LOGIN_REQUEST))
        {
            JsonNode lDataNode1 = lNode.get(Constants.REQUEST_DATA);
            LoginRequest lLogin = lObjectMapper.treeToValue(lDataNode1, LoginRequest.class);

            if (lLogin.getCompress() != null)
            {
                if (lLogin.getCompress().trim().equalsIgnoreCase(Constants.COMPRESS_FLAG_TRUE))
                {
                    enableJSON(ctx);
                    enableGzip(ctx);
                    ctx.getPipeline().remove(this);
                }
                else
                {
                    enableJSON(ctx);
                    ctx.getPipeline().remove(this);
                }
            }
            else
            {
                enableJSON(ctx);
                ctx.getPipeline().remove(this);

            }
        }
    }

    // Forward the current read buffer as is to the new handlers.
    return buffer.readBytes(buffer.readableBytes());
}

private void enableJSON(ChannelHandlerContext ctx)
{
    ChannelPipeline pipeline = ctx.getPipeline();

   boolean lHandlerExists = pipeline.getContext("bufferedwriter") != null;

    if (!lHandlerExists)
    {
        pipeline.addFirst("bufferedwriter", new MyBufferedWriteHandler()); // 80960
    }

    boolean lHandlerExists = pipeline.getContext("framer") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("framer", new DelimiterBasedFrameDecoder(65535,
                new ChannelBuffer[]
                {
                    ChannelBuffers.wrappedBuffer(
                    new byte[]
                    {
                        '\n'
                    })
                }));
    }

    lHandlerExists = pipeline.getContext("decoder") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("decoder", new StringDecoder(CharsetUtil.UTF_8));
    }

    lHandlerExists = pipeline.getContext("encoder") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("encoder", new StringEncoder(CharsetUtil.UTF_8));
    }

    lHandlerExists = pipeline.getContext("executor") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("executor", executionHandler);
    }

    lHandlerExists = pipeline.getContext("handler") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("handler", new ConnectionServiceUpStreamHandler(appConfiguration));
    }

    lHandlerExists = pipeline.getContext("unite") != null;
    if (!lHandlerExists)
    {
        pipeline.addLast("unite", new PortUnificationServerHandler(appConfiguration, executionHandler));
    }
}

private void enableGzip(ChannelHandlerContext ctx)
{
    ChannelPipeline pipeline = ctx.getPipeline();

    //pipeline.remove("decoder");
    //pipeline.addLast("decoder", new MyStringDecoder(CharsetUtil.UTF_8, true));
    //pipeline.addLast("compress", new CompressionHandler(80, "gzipdeflater"));
    boolean lHandlerExists = pipeline.getContext("encoder") != null;
    if (lHandlerExists)
    {
        pipeline.remove("encoder");
    }


    lHandlerExists = pipeline.getContext("gzipdeflater") != null;
    if (!lHandlerExists)
    {
        pipeline.addBefore("executor", "gzipdeflater", new ZlibEncoder(ZlibWrapper.GZIP));
    }


    lHandlerExists = pipeline.getContext("lengthprepender") != null;
    if (!lHandlerExists)
    {
        pipeline.addAfter("gzipdeflater", "lengthprepender", new LengthFieldPrepender(4));
    }

}

}

BufferedWriterHandler

   public class MyBufferedWriteHandler extends BufferedWriteHandler

{

private final AtomicLong bufferSize = new AtomicLong();
final Logger logger = LoggerFactory.getLogger(getClass());


 public MyBufferedWriteHandler() {
     // Enable consolidation by default.
     super(true);
 }

@Override
public void writeRequested(ChannelHandlerContext ctx, MessageEvent e) throws Exception
{
     ChannelBuffer data = (ChannelBuffer) e.getMessage();

    if (e.getChannel().isWritable())
    {
        long newBufferSize = bufferSize.get();
        // Flush the queue if it gets larger than 8KiB.
        if (newBufferSize > 0)
        {
            flush();
            bufferSize.set(0);
        }
        ctx.sendDownstream(e);
    }
    else
    {
        logger.warn( "Buffering data for : " + e.getChannel().getRemoteAddress() );
        super.writeRequested(ctx, e);
        bufferSize.addAndGet(data.readableBytes());
    }
}

@Override
public void channelInterestChanged(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception
{
    if (e.getChannel().isWritable())
    {
        flush();
    }
}

Subscriber类中用于写入数据的函数

 public void writeToClient(Channel pClientChannel, String pMessage) throws IOException
{
    String lMessage = pMessage;
    if (pClientChannel.isWritable())
    {

            lMessage += Constants.RESPONSE_DELIMITER;
            pClientChannel.write(lMessage);

    }
    else
    {
         logger.warn(DroppedCounter++ + " droppped : " + pMessage);
    }
}

我已经实现了我在stackoverflow和其他网站上阅读的一些建议。但我没有成功地解决这个问题。

请建议或建议我缺少什么?

由于

0 个答案:

没有答案