合并Google云端存储中的32个以上文件

时间:2014-10-03 12:21:43

标签: google-cloud-storage apache-spark google-compute-engine

我在Google Compute Engine上运行了一个Apache Spark脚本,用于输出Google云端存储。我的Cloud Storage文件夹中有300多个part-00XXX文件。我想合并它们。

我试过了:

poiuytrez@spark-m:~$ gsutil compose gs://mybucket/data/* gs://mybucket/myfile.csv

但我收到了这个错误:

CommandException: "compose" called with too many component objects. Limit is 32.

关于合并所有这些部分文件的解决方案的任何想法?

2 个答案:

答案 0 :(得分:5)

您只能在一个请求中撰写32个对象,但复合对象最多可包含1024个组件。特别是,您可以将对象0-31组合成某个对象0',32-63组成1'等等 - 然后可以通过组合(0',1',...,地板)再次组合这些复合对象中的每一个(32分之300)')。

答案 1 :(得分:0)

每当组成对象的大小达到composed/ MB限制时,此代码就将对象从一个路径构造到n

import os
from uuid import uuid4
from itertools import count

from google.cloud import storage


def delete_directory(bucket, prefix):
    print(f"cleaning temporary blobs in {prefix}")
    for blob in bucket.list_blobs(prefix=prefix):
        blob.delete()


def _run_compose(bucket, blob_name, blobs_to_compose):
    composed_blob = bucket.blob(blob_name)
    composed_blob.compose(blobs_to_compose, timeout=600)
    return composed_blob


def create_composed_blob_name(dest_prefix, offset=0):
    return f"{dest_prefix}/composed-{offset}-{uuid4().hex[:4]}.jsonl"


def compose(
    client,
    bucket_name,
    source_prefix,
    dest_prefix,
    create_new_line_blob,
    merge_when_size_mb=250,
    print_every_n_blobs=100,
):
    """

    Args:
        client:
        bucket_name:
        source_prefix:
        dest_prefix:
        create_new_line_blob:
        merge_when_size: compose when you hit `n` MB.
        merge_when_size_mb:
        print_every_n_blobs:

    Returns:

    """

    merge_when_size = merge_when_size_mb * 1024 * 1024  # MB

    bucket = client.bucket(bucket_name)

    if create_new_line_blob:
        new_line_blob = bucket.blob("tmp/new_line.txt")
        new_line_blob.upload_from_string("\n")

    blobs_to_compose = []
    composed_blob_offset = count(0)
    running_size = 0
    i = 0

    f = open("/tmp/all_composed_blobs.txt", "w")

    for i, blob in enumerate(bucket.list_blobs(prefix=source_prefix), 1):
        f.write(f"{bucket_name}/{blob.name}\n")

        if i % print_every_n_blobs == 0:
            print(f"{i} blob processed.")

        blobs_to_compose.append(blob)
        running_size += blob.size

        if len(blobs_to_compose) == 31:
            blob_name = create_composed_blob_name(
                "composed/tmp", next(composed_blob_offset)
            )
            composed_blob = _run_compose(bucket, blob_name, blobs_to_compose)
            blobs_to_compose = [composed_blob]

            # refresh all counters

        if create_new_line_blob:
            blobs_to_compose.append(new_line_blob)

        if running_size >= merge_when_size:
            blob_name = create_composed_blob_name(
                dest_prefix, next(composed_blob_offset)
            )
            _run_compose(bucket, blob_name, blobs_to_compose)
            # refresh all counters
            blobs_to_compose = []
            running_size = 0

    print(f"Last processed blob is {i}.")

    # compose the remaining, if any.
    if len(blobs_to_compose) != 0:
        blob_name = create_composed_blob_name(dest_prefix, next(composed_blob_offset))
        _run_compose(bucket, blob_name, blobs_to_compose)

    # final operations -- uploading the list of blob names & delete temporary directory.
    delete_directory(bucket, prefix="composed/tmp")
    f.close()
    bucket.blob(
        f"composed/composed-files/{os.path.basename(f.name)}"
    ).upload_from_filename(f.name)


def run():
    client = storage.Client()
    bucket_name = "some_bucket"
    source_prefixes = ["some_prefix", "another_prefix"]
    for source_prefix in source_prefixes:
        compose(
            client,
            bucket_name,
            source_prefix,
            f"composed/{source_prefix}",
            create_new_line_blob=True,
            merge_when_size_mb=250,
        )


if __name__ == "__main__":
    run()