在这种情况下,为什么使用AsParallel()比foreach慢?

时间:2011-08-24 14:08:26

标签: c# linq performance loops parallel-processing

我从excel中提取此格式的数据

 product1   | unnamedcol2 | product2  | unnamedcol4 | product3  | unnamedcol6 |
-------------------------------------------------------------------------------
 @1foo      |        1.10 | @1foo     |         0.3 | @1foo     |         0.3
 @2foo      |        1.00 | @2foo     |           2 | @2foo     |
 @3foo      |        1.52 | @3foo     |        2.53 | @3foo     |
 @4foo      |        1.47 |           |             | @4foo     |        1.31
 @5foo      |        1.49 |           |             | @5foo     |        1.31

该文件使用全部255个字段。使用dapper-dot-net我通过这段代码获取数据

IEnumerable<IDictionary<string, object>> excelDataRaw =
                conn.Query(string.Format("select * from {0}", table)).Cast<IDictionary<string, object>>();

我将这些数据传递给这些测试方法。数据作为IDnumerable IDicaries返回,其中每个键都是一个产品,每个值都是一个IDictionary,其中每个键是product列中的值,相应的值是unnamedcol中产品列右侧的值。

var excelDataRefined = new List<IDictionary<string, IDictionary<string, decimal>>>();
excelDataRefined.Add(new Dictionary<string, IDictionary<string, decimal>>());
excelDataRefined[0].Add( "product", new Dictionary<string, decimal>());
excelDataRefined[0]["product"].Add("@1foo", 1.1m);

方法:

private static Dictionary<string, IDictionary<string, decimal>> Benchmark_foreach(IEnumerable<IDictionary<string, object>> excelDataRaw)
{
    Console.WriteLine("1. Using foreach");
    var watch = new Stopwatch();
    watch.Start();

    List<string> headers = excelDataRaw.Select(dictionary => dictionary.Keys).First().ToList();
    bool isEven = false;
    List<string> products = headers.Where(h => isEven = !isEven).ToList();
    var dates = new List<IEnumerable<object>>();
    var prices = new List<IEnumerable<object>>();

    foreach (string field in headers)
    {
        string product1 = field;
        if (headers.IndexOf(field) % 2 == 0)
        {
            dates.Add(
                excelDataRaw.AsParallel().AsOrdered().Select(col => col[product1]).Where(row => row != null));
        }

        if (headers.IndexOf(field) % 2 == 1)
        {
            prices.Add(
                excelDataRaw.AsParallel().AsOrdered().Select(col => col[product1] ?? 0m).Take(dates.Last().Count()));
        }
    }

    watch.Stop();
    Console.WriteLine("Rearange the data in: {0}s", watch.Elapsed.TotalSeconds);
    watch.Restart();

    var excelDataRefined = new Dictionary<string, IDictionary<string, decimal>>();
    foreach (IEnumerable<object> datelist in dates)
    {
        decimal num;
        IEnumerable<object> datelist1 = datelist;
        IEnumerable<object> pricelist =
            prices[dates.IndexOf(datelist1)].Select(value => value ?? 0m).Where(
                content => decimal.TryParse(content.ToString(), out num));
        Dictionary<string, decimal> dict =
            datelist1.Zip(pricelist, (k, v) => new { k, v }).ToDictionary(
                x => (string)x.k, x => decimal.Parse(x.v.ToString()));

        if (!excelDataRefined.ContainsKey(products[dates.IndexOf(datelist1)]))
        {
            excelDataRefined.Add(products[dates.IndexOf(datelist1)], dict);
        }
    }

    watch.Stop();
    Console.WriteLine("Zipped the data in: {0}s", watch.Elapsed.TotalSeconds);

    return excelDataRefined;
}

private static Dictionary<string, IDictionary<string, decimal>> Benchmark_AsParallel(IEnumerable<IDictionary<string, object>> excelDataRaw)
{
    Console.WriteLine("2. Using AsParallel().AsOrdered().ForAll");
    var watch = new Stopwatch();
    watch.Start();

    List<string> headers = excelDataRaw.Select(dictionary => dictionary.Keys).First().ToList();
    bool isEven = false;
    List<string> products = headers.Where(h => isEven = !isEven).ToList();
    var dates = new List<IEnumerable<object>>();
    var prices = new List<IEnumerable<object>>();

    headers.AsParallel().AsOrdered().ForAll(
        field =>
        dates.Add(
            excelDataRaw.AsParallel().AsOrdered().TakeWhile(x => headers.IndexOf(field) % 2 == 0).Select(
                col => col[field]).Where(row => row != null).ToList()));
    headers.AsParallel().AsOrdered().ForAll(
        field =>
        prices.Add(
            excelDataRaw.AsParallel().AsOrdered().TakeWhile(x => headers.IndexOf(field) % 2 == 1).Select(
                col => col[field] ?? 0m).Take(256).ToList()));
    dates.RemoveAll(x => x.Count() == 0);
    prices.RemoveAll(x => x.Count() == 0);

    watch.Stop();
    Console.WriteLine("Rearange the data in: {0}s", watch.Elapsed.TotalSeconds);
    watch.Restart();

    var excelDataRefined = new Dictionary<string, IDictionary<string, decimal>>();
    foreach (IEnumerable<object> datelist in dates)
    {
        decimal num;
        IEnumerable<object> datelist1 = datelist;
        IEnumerable<object> pricelist =
            prices[dates.IndexOf(datelist1)].Select(value => value ?? 0m).Where(
                content => decimal.TryParse(content.ToString(), out num));
        Dictionary<string, decimal> dict =
            datelist1.Zip(pricelist, (k, v) => new { k, v }).ToDictionary(
                x => (string)x.k, x => decimal.Parse(x.v.ToString()));

        if (!excelDataRefined.ContainsKey(products[dates.IndexOf(datelist1)]))
        {
            excelDataRefined.Add(products[dates.IndexOf(datelist1)], dict);
        }
    }

    watch.Stop();
    Console.WriteLine("Zipped the data in: {0}s", watch.Elapsed.TotalSeconds);

    return excelDataRefined;
}

private static Dictionary<string, IDictionary<string, decimal>> Benchmark_ForEach(IEnumerable<IDictionary<string, object>> excelDataRaw)
{
    Console.WriteLine("3. Using ForEach");
    var watch = new Stopwatch();
    watch.Start();

    List<string> headers = excelDataRaw.Select(dictionary => dictionary.Keys).First().ToList();
    bool isEven = false;
    List<string> products = headers.Where(h => isEven = !isEven).ToList();
    var dates = new List<IEnumerable<object>>();
    var prices = new List<IEnumerable<object>>();

    headers.ForEach(
        field =>
        dates.Add(
            excelDataRaw.TakeWhile(x => headers.IndexOf(field) % 2 == 0).Select(col => col[field]).Where(
                row => row != null).ToList()));
    headers.ForEach(
        field =>
        prices.Add(
            excelDataRaw.TakeWhile(x => headers.IndexOf(field) % 2 == 1).Select(col => col[field] ?? 0m).
            Take(256).ToList()));
    dates.RemoveAll(x => x.Count() == 0);
    prices.RemoveAll(x => x.Count() == 0);

    watch.Stop();
    Console.WriteLine("Rearange the data in: {0}s", watch.Elapsed.TotalSeconds);
    watch.Restart();

    var excelDataRefined = new Dictionary<string, IDictionary<string, decimal>>();
    foreach (IEnumerable<object> datelist in dates)
    {
        decimal num;
        IEnumerable<object> datelist1 = datelist;
        IEnumerable<object> pricelist =
            prices[dates.IndexOf(datelist1)].Select(value => value ?? 0m).Where(
                content => decimal.TryParse(content.ToString(), out num));
        Dictionary<string, decimal> dict =
            datelist1.Zip(pricelist, (k, v) => new { k, v }).ToDictionary(
                x => (string)x.k, x => decimal.Parse(x.v.ToString()));

        if (!excelDataRefined.ContainsKey(products[dates.IndexOf(datelist1)]))
        {
            excelDataRefined.Add(products[dates.IndexOf(datelist1)], dict);
        }
    }

    watch.Stop();
    Console.WriteLine("Zipped the data in: {0}s", watch.Elapsed.TotalSeconds);

    return excelDataRefined;
}
  • Benchmark_foreach需要app。 3,5s重新排列,3s压缩数据。
  • Benchmark_AsParallel需要app。 12s重新排列和0,005s来压缩数据。
  • Benchmark_ForEach需要app。 16s重新排列和0,005s来压缩数据。

为什么它会像这样?我期望AsParallel最快,因为它并行执行而不是顺序执行。何我优化这个?

3 个答案:

答案 0 :(得分:8)

为了进行并行计算,您必须拥有多个处理器或内核,否则您只是在线程池中排队等待CPU的任务。即单核心机器上的AsParallel是顺序加上线程池和线程上下文切换的开销。即使在双核机器上,也可能无法获得两个内核,因为许多其他内容在同一台机器上运行。

如果您有长时间运行的阻塞操作(I / O)任务,其中操作系统可以暂停阻塞线程并让另一个线程运行,那么

真的.AsParallel()才会变得有用。

答案 1 :(得分:4)

创建其他线程并管理每个线程的工作负载都会产生开销。如果您的工作量有限,那么创建额外线程的开销,线程之间的任务切换,工作窃取以及线程之间的重新分配等可能会超过您通过并行工作获得的收益。您可能希望对应用程序进行概要分析,以确定在使用单个进程运行时是否真的受CPU限制。如果没有,最好保持单线程,你的瓶颈就变成了IO,这并不容易并行化。

另外一些建议:使用AsOrdered和TakeWhile会看到性能损失,因为它们都需要同步回原始线程。考虑分析而不需要订购,看看是否能提供任何性能改进。

另外,考虑使用ConcurrentDictionary而不是标准通用字典来避免在添加项目时出现并发问题。

答案 2 :(得分:0)

在Benchmark_AsParallel和Benchmark_ForEach中,您在Benchmark_foreach n中执行2n操作。