模块'apache_beam.io.filesystems'没有属性'Filesystems'

时间:2019-03-15 09:12:43

标签: python google-cloud-platform google-cloud-dataflow apache-beam apache-beam-io

我正在尝试从pubsub中读取元数据,并且我可以成功读取该消息,此后,我试图传递存储桶名称和文件名,以便我可以打开gcs文件来执行某些操作,但是我无法执行这样。

from __future__ import absolute_import
import argparse
import logging
from past.builtins import unicode
import json
#from google.cloud import language
#from google.cloud.language import enums
#from google.cloud.language import types
import apache_beam as beam
import apache_beam.transforms.window as window
import re
from builtins import object
from past.builtins import unicode
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileSystem
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.examples.wordcount import WordExtractingDoFn
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.io.textio import ReadFromText, WriteToText
def run(argv=None):
  """Build and run the pipeline."""
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--output_topic', required=True,
      help=('Output PubSub topic of the form '
            '"projects/bakers-dev-230413/topics/outsub".'))
  group = parser.add_mutually_exclusive_group(required=True)
  group.add_argument(
      '--input_topic',
      help=('Input PubSub topic of the form '
            '"projects/bakers-dev-230413/topics/testsub1".'))
  group.add_argument(
      '--input_subscription',
      help=('Input PubSub subscription of the form '
            '"projects/bakers-dev- 
       230413/subscriptions/test_subscription."'))
  known_args, pipeline_args = parser.parse_known_args(argv)
  # We use the save_main_session option because one or more DoFn's in this
  # workflow rely on global context (e.g., a module imported at module 
   level).
  pipeline_options = PipelineOptions(pipeline_args)
  pipeline_options.view_as(SetupOptions).save_main_session = True
  pipeline_options.view_as(StandardOptions).streaming = True
  p = beam.Pipeline(options=pipeline_options)

  # Read from PubSub into a PCollection.
  if known_args.input_subscription:
    messages = (p
                | beam.io.ReadFromPubSub(
                    subscription=known_args.input_subscription)
                .with_output_types(bytes))
  else:
    messages = (p
                | beam.io.ReadFromPubSub(topic=known_args.input_topic)
                .with_output_types(bytes))
  def print_row(row):
    print(row)
  file_metadata_pcoll = (messages | 'decode' >> beam.Map(lambda x: 
    x.decode('utf-8')))
  lines = (file_metadata_pcoll | 'read_file' >>  beam.FlatMap(lambda 
   metadata: beam.io.filesystems.Filesystems.open('gs://%s/%s' % 
   (metadata['bucket'], metadata['name'])))
                     | "print" >> beam.Map(print_row))              
  result = p.run()
  result.wait_until_finish()

if __name__ == '__main__':
  logging.getLogger().setLevel(logging.INFO)
  run()

我收到此错误

ERROR:root:捆绑包中的异常

<apache_beam.runners.direct.bundle_factory._Bundle object at 0x7f96cd9d5d08>, due to an exception.
 Traceback (most recent call last):
  File "apache_beam/runners/common.py", line 727, in apache_beam.runners.common.DoFnRunner.process
  File "apache_beam/runners/common.py", line 419, in apache_beam.runners.common.SimpleInvoker.invoke_process
  File "/home/g9192gks/baker-template/pipeline/pubsub.py", line 79, in <lambda>
    lines = (file_metadata_pcoll | 'read_file' >>  beam.FlatMap(lambda metadata: beam.io.filesystems.Filesystems.open('gs://%s/%s' % (metadata['bucket'], metadata['name'])))
AttributeError: module 'apache_beam.io.filesystems' has no attribute 'Filesystems'

我尝试通过几种方式导入模块:

from apache_beam.io.filesystem import FileSystem also
from apache_beam.io.filesystem import FileSystems also 
from apache_beam.io.filesystems import FileSystems
#and i also changed 
 lines = (file_metadata_pcoll | 'read_file' >>  beam.FlatMap(lambda metadata: beam.io.filesystems.Filesystems.open('gs://%s/%s' % (metadata['bucket'], metadata['name'])))

 lines = (file_metadata_pcoll | 'read_file' >>  beam.FlatMap(lambda metadata: beam.io.filesystem.Filesystem.open('gs://%s/%s' % (metadata['bucket'], metadata['name'])))

但没有运气

1 个答案:

答案 0 :(得分:0)

您要

from apache_beam.io.filesystems import FileSystems

请参见https://beam.apache.org/releases/pydoc/2.4.0/apache_beam.io.filesystems.html