TPL数据流处理N个最新消息

时间:2017-07-02 17:49:26

标签: c# .net tpl-dataflow

我试图创建某种队列来处理收到的N条最新消息。现在我有这个:

private static void SetupMessaging()
{
    _messagingBroadcastBlock = new BroadcastBlock<string>(msg => msg, new ExecutionDataflowBlockOptions
    {
        //BoundedCapacity = 1,
        EnsureOrdered = true,
        MaxDegreeOfParallelism = 1,
        MaxMessagesPerTask = 1
    });

    _messagingActionBlock = new ActionBlock<string>(msg =>
    {
        Console.WriteLine(msg);
        Thread.Sleep(5000);
    }, new ExecutionDataflowBlockOptions
    {
        BoundedCapacity = 2,
        EnsureOrdered = true,
        MaxDegreeOfParallelism = 1,
        MaxMessagesPerTask = 1    
    });

    _messagingBroadcastBlock.LinkTo(_messagingActionBlock, new DataflowLinkOptions { PropagateCompletion = true });
    _messagingBroadcastBlock.LinkTo(DataflowBlock.NullTarget<string>());
}

问题是,如果我发布1,2,3,4,5我会得到1,2,5,但我希望它是1,4,5。欢迎提出任何建议 UPD 1
我能够使以下解决方案工作

class FixedCapacityActionBlock<T>
{
    private readonly ActionBlock<CancellableMessage<T>> _actionBlock;

    private readonly ConcurrentQueue<CancellableMessage<T>> _inputCollection = new ConcurrentQueue<CancellableMessage<T>>();

    private readonly int _maxQueueSize;

    private readonly object _syncRoot = new object();

    public FixedCapacityActionBlock(Action<T> act, ExecutionDataflowBlockOptions opt)
    {
        var options = new ExecutionDataflowBlockOptions
        {
            EnsureOrdered = opt.EnsureOrdered,
            CancellationToken = opt.CancellationToken,
            MaxDegreeOfParallelism = opt.MaxDegreeOfParallelism,
            MaxMessagesPerTask = opt.MaxMessagesPerTask,
            NameFormat = opt.NameFormat,
            SingleProducerConstrained = opt.SingleProducerConstrained,
            TaskScheduler = opt.TaskScheduler,
            //we intentionally ignore this value
            //BoundedCapacity = opt.BoundedCapacity
        };
        _actionBlock = new ActionBlock<CancellableMessage<T>>(cmsg =>
        {
            if (cmsg.CancellationTokenSource.IsCancellationRequested)
            {
                return;
            }

            act(cmsg.Message);
        }, options);

        _maxQueueSize = opt.BoundedCapacity;
    }

    public bool Post(T msg)
    {
        var fullMsg = new CancellableMessage<T>(msg);

        //what if next task starts here?
        lock (_syncRoot)
        {
            _inputCollection.Enqueue(fullMsg);

            var itemsToDrop = _inputCollection.Skip(1).Except(_inputCollection.Skip(_inputCollection.Count - _maxQueueSize + 1));

            foreach (var item in itemsToDrop)
            {
                item.CancellationTokenSource.Cancel();
                CancellableMessage<T> temp;
                _inputCollection.TryDequeue(out temp);
            }

            return _actionBlock.Post(fullMsg);
        }
    }
}

class CancellableMessage<T> : IDisposable
{
    public CancellationTokenSource CancellationTokenSource { get; set; }

    public T Message { get; set; }

    public CancellableMessage(T msg)
    {
        CancellationTokenSource = new CancellationTokenSource();
        Message = msg;
    }

    public void Dispose()
    {
        CancellationTokenSource?.Dispose();
    }
}

虽然这有效,但实际上这项工作看起来很脏,也可能不是线程安全的。

2 个答案:

答案 0 :(得分:1)

这是一个TransformBlockActionBlock的实现,只要收到较新的消息并且达到了BoundedCapacity的限制,它就会在队列中丢弃最旧的消息。它的行为与通过Channel配置的BoundedChannelFullMode.DropOldest类似。

public static IPropagatorBlock<TInput, TOutput>
    CreateTransformBlockDropOldest<TInput, TOutput>(
    Func<TInput, Task<TOutput>> transform,
    ExecutionDataflowBlockOptions dataflowBlockOptions = null,
    IProgress<TInput> droppedMessages = null)
{
    if (transform == null) throw new ArgumentNullException(nameof(transform));
    dataflowBlockOptions = dataflowBlockOptions ?? new ExecutionDataflowBlockOptions();

    var boundedCapacity = dataflowBlockOptions.BoundedCapacity;
    var cancellationToken = dataflowBlockOptions.CancellationToken;

    var queue = new Queue<TInput>(Math.Max(0, boundedCapacity));

    var outputBlock = new BufferBlock<TOutput>(new DataflowBlockOptions()
    {
        BoundedCapacity = boundedCapacity,
        CancellationToken = cancellationToken
    });

    if (boundedCapacity != DataflowBlockOptions.Unbounded)
        dataflowBlockOptions.BoundedCapacity = checked(boundedCapacity * 2);
    // After testing, at least boundedCapacity + 1 is required.
    // Make it double to be sure that all non-dropped messages will be processed.
    var transformBlock = new ActionBlock<object>(async _ =>
    {
        TInput item;
        lock (queue)
        {
            if (queue.Count == 0) return;
            item = queue.Dequeue();
        }
        var result = await transform(item).ConfigureAwait(false);
        await outputBlock.SendAsync(result, cancellationToken).ConfigureAwait(false);
    }, dataflowBlockOptions);
    dataflowBlockOptions.BoundedCapacity = boundedCapacity; // Restore initial value

    var inputBlock = new ActionBlock<TInput>(item =>
    {
        var droppedEntry = (Exists: false, Item: (TInput)default);
        lock (queue)
        {
            transformBlock.Post(null);
            if (queue.Count == boundedCapacity) droppedEntry = (true, queue.Dequeue());
            queue.Enqueue(item);
        }
        if (droppedEntry.Exists) droppedMessages?.Report(droppedEntry.Item);
    }, new ExecutionDataflowBlockOptions()
    {
        CancellationToken = cancellationToken
    });

    PropagateCompletion(inputBlock, transformBlock);
    PropagateFailure(transformBlock, inputBlock);
    PropagateCompletion(transformBlock, outputBlock);
    _ = transformBlock.Completion.ContinueWith(_ => { lock (queue) queue.Clear(); },
        TaskScheduler.Default);

    return DataflowBlock.Encapsulate(inputBlock, outputBlock);

    async void PropagateCompletion(IDataflowBlock source, IDataflowBlock target)
    {
        try { await source.Completion.ConfigureAwait(false); } catch { }
        var exception = source.Completion.IsFaulted ? source.Completion.Exception : null;
        if (exception != null) target.Fault(exception); else target.Complete();
    }
    async void PropagateFailure(IDataflowBlock source, IDataflowBlock target)
    {
        try { await source.Completion.ConfigureAwait(false); } catch { }
        if (source.Completion.IsFaulted) target.Fault(source.Completion.Exception);
    }
}

// Overload with synchronous lambda
public static IPropagatorBlock<TInput, TOutput>
    CreateTransformBlockDropOldest<TInput, TOutput>(
    Func<TInput, TOutput> transform,
    ExecutionDataflowBlockOptions dataflowBlockOptions = null,
    IProgress<TInput> droppedMessages = null)
{
    return CreateTransformBlockDropOldest(item => Task.FromResult(transform(item)),
        dataflowBlockOptions, droppedMessages);
}

// ActionBlock equivalent
public static ITargetBlock<TInput>
    CreateActionBlockDropOldest<TInput>(
    Func<TInput, Task> action,
    ExecutionDataflowBlockOptions dataflowBlockOptions = null,
    IProgress<TInput> droppedMessages = null)
{
    if (action == null) throw new ArgumentNullException(nameof(action));
    var block = CreateTransformBlockDropOldest<TInput, object>(
        async item => { await action(item).ConfigureAwait(false); return null; },
        dataflowBlockOptions, droppedMessages);
    block.LinkTo(DataflowBlock.NullTarget<object>());
    return block;
}

// ActionBlock equivalent with synchronous lambda
public static ITargetBlock<TInput>
    CreateActionBlockDropOldest<TInput>(
    Action<TInput> action,
    ExecutionDataflowBlockOptions dataflowBlockOptions = null,
    IProgress<TInput> droppedMessages = null)
{
    return CreateActionBlockDropOldest(
        item => { action(item); return Task.CompletedTask; },
        dataflowBlockOptions, droppedMessages);
}

这个想法是将排队的项目存储在辅助Queue中,并将伪(空)值传递给内部ActionBlock<object>。该块将忽略作为参数传递的项目,而是从队列中获取一个项目(如果有)。 Αlock用于确保最终处理队列中所有未丢弃的项目(当然,除非发生异常)。

还有一个附加功能。可选的IProgress<TInput> droppedMessages自变量允许每次删除消息时接收通知。

用法示例:

_messagingActionBlock = CreateActionBlockDropOldest<string>(msg =>
{
    Console.WriteLine($"Processing: {msg}");
    Thread.Sleep(5000);
}, new ExecutionDataflowBlockOptions
{
    BoundedCapacity = 2,
}, new Progress<string>(msg =>
{
    Console.WriteLine($"Message dropped: {msg}");
}));

答案 1 :(得分:0)

TPL Dataflow并不适合Last N messages,因为它意味着是队列或管道(FIFO),而不是堆栈(LIFO)。您真的需要使用数据流库吗?

使用ConcurrentStack<T>会更容易,你只需要引入一个生成器任务,它发布到堆栈,以及一个消费者任务,它从堆栈中获取消息,而处理的数量小于{{ 1}}(More about Producer-Consumer)。

如果你需要N,你可以在消费者任务中使用它,开始处理最后的消息,但不能在生产者中处理,因为它实际上不是它的用途。此外,还有一些其他库具有基于事件的架构,这可能更适合您的问题。