aioboto3加速不如预期

时间:2018-03-30 20:46:17

标签: boto3 aiohttp aio

我正在尝试aioboto3 lib,它看起来非常有希望加快某些任务的速度。例如,我需要为特定存储桶和前缀中的所有S3对象找到标签。但不幸的是,速度的提升并不是我所希望的。

有1000个物体,大概是一半时间。 有8000个对象,大约在同一时间! 这是在c3.8xlarge EC2实例上运行的。

代码:

import asyncio
import aioboto3
from boto3.dynamodb.conditions import Key
import boto3
import logging

import time

def dbg(*args):
    print(args[0] % args[1:])

def avg(l):
    return sum(l) / len(l)

def get_versions(count):
    s3cli = boto3.client('s3')

    r = s3cli.list_object_versions(Bucket=bucket)
    l = r['Versions']
    while True:
        if not r['IsTruncated'] or len(l) >= count:
            return l[:count]
        r = s3cli.list_object_versions(Bucket=bucket,KeyMarker=r['NextKeyMarker'],VersionIdMarker=r['NextVersionIdMarker'])
        l.extend(r['Versions'])


def try_s3_sync(versions):

    s3cli = boto3.client('s3')

    t = time.time()
    rtags = []
    for ver in versions:
        rtag = s3cli.get_object_tagging(Bucket=bucket,Key=ver['Key'],VersionId=ver['VersionId'])
        rtags.append(rtag)

    elapsed = time.time() - t

    dbg("sync elapsed <%s>",elapsed)
    return elapsed


async def a_try_s3(versions):

    async with aioboto3.client('s3') as s3cli:

        t = time.time()
        futures = [s3cli.get_object_tagging(Bucket=bucket,Key=ver['Key'],VersionId=ver['VersionId']) for ver in versions]
        rtags, other = await asyncio.wait(futures)

        elapsed = time.time() - t

        dbg("async elapsed <%s>",elapsed)
        return elapsed


def try_s3_async(versions):
    loop = asyncio.get_event_loop()
    return loop.run_until_complete(a_try_s3(versions))


# -------------------------------------------


if __name__ == '__main__':

    for num in (1000,8000):
        versions = get_versions(num)
        dbg("len(versions) <%s>",len(versions))

        tries = 3
        dbg('avg for sync: %s',avg(list(try_s3_sync(versions) for _ in range(tries))))
        dbg('avg for async: %s',avg(list(try_s3_async(versions) for _ in range(tries))))

输出:

len(versions) <1000>
sync elapsed <19.383010864257812>
sync elapsed <20.18708372116089>
sync elapsed <20.515722513198853>
avg for sync: 20.028605699539185
async elapsed <13.05319333076477>
async elapsed <7.40950345993042>
async elapsed <9.881770372390747>
avg for async: 10.114822387695312
len(versions) <8000>
sync elapsed <168.69372606277466>
sync elapsed <158.15257668495178>
sync elapsed <167.32361602783203>
avg for sync: 164.7233062585195
async elapsed <158.08434414863586>
async elapsed <165.93541312217712>
async elapsed <165.63341856002808>
avg for async: 163.21772527694702

任何建议都表示赞赏。

1 个答案:

答案 0 :(得分:0)

您不能只是将数千个任务转储到事件循环中,否则会降低整个程序的速度。您需要执行以下操作:

from asyncpool import AsyncPool
import aiobotocore.session

async def a_try_s3(versions):
    max_parallel_tasks = 100
    session = aiobotocore.session.AioSession()
    config = aiobotocore.config.AioConfig(max_pool_connections=max_parallel_tasks)
    logger = logging.getLogger("ExamplePool")
    rtags = []

    async with session.create_client('s3', config=config) as s3cli, \
            AsyncPool(loop, num_workers=max_parallel_tasks, name="WorkPool",
                             logger=logger,
                             worker_co=s3cli.get_object_tagging,
                             log_every_n=100, raise_on_join=True) as pool:

        async def get_object_tagging(*, Key, VersionId):
            rtag = await s3cli.get_object_tagging(Bucket=bucket, Key=Key, VersionId=VersionId)
            rtags.append(rtag)

        t = time.time()

        for ver in versions:
            await pool.push(Key=ver['Key'], VersionId=ver['VersionId'])

    elapsed = time.time() - t

    dbg("async elapsed <%s>", elapsed)
    return elapsed