我使用Python和boto3来处理火花上的一些S3文件,当我下载文件时,这很不寻常:'s3'资源不存在。
因为没有在每个群集节点上安装boto3,所以我将boto3使用的依赖项软件包打包为zip,并使用了-py-files提交的spark群集,并且发生了此异常。
Py4JJavaErrorTraceback (most recent call last)
<ipython-input-3-8147865bf49c> in <module>()
2
3
----> 4 extractor.extract(paths)
/usr/local/lib/python2.7/site-packages/extract-1.0-py2.7.egg/extract.pyc in extract(self, target_files_path)
52 try:
53 sc = self.get_spark_context()
---> 54 self._extract_file(sc, target_files_path)
55 finally:
56 if sc:
/usr/local/lib/python2.7/site-packages/extract-1.0-py2.7.egg/extract.pyc in _extract_file(self, sc, target_files_path)
109 def _extract_file(self, sc, target_files_path):
110 file_rdd = sc.parallelize(target_files_path, len(target_files_path))
--> 111 result_rdd = file_rdd.map(lambda file_path: self.process(file_path, self.func)).collect()
112 result_rdd.saveAsTextFile(self.result_path)
113
/usr/lib/spark/python/pyspark/rdd.py in collect(self)
769 """
770 with SCCallSiteSync(self.context) as css:
--> 771 port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
772 return list(_load_from_socket(port, self._jrdd_deserializer))
773
/usr/lib/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
811 answer = self.gateway_client.send_command(command)
812 return_value = get_return_value(
--> 813 answer, self.gateway_client, self.target_id, self.name)
814
815 for temp_arg in temp_args:
/usr/lib/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
306 raise Py4JJavaError(
307 "An error occurred while calling {0}{1}{2}.\n".
--> 308 format(target_id, ".", name), value)
309 else:
310 raise Py4JError(
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 0.0 failed 4 times, most recent failure: Lost task 2.3 in stage 0.0 (TID 15, ip-172-20-219-210.corp.hpicloud.net): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/worker.py", line 111, in main
process()
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/worker.py", line 106, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/usr/local/lib/python2.7/site-packages/extract-1.0-py2.7.egg/extract.py", line 111, in <lambda>
File "./lib.zip/extract.py", line 115, in process
local_path = download_file_from_s3(self.app_name, file_path)
File "./lib.zip/extract.py", line 22, in download_file_from_s3
s3 = boto3.resource('s3')
File "./lib.zip/boto3/__init__.py", line 100, in resource
return _get_default_session().resource(*args, **kwargs)
File "./lib.zip/boto3/session.py", line 347, in resource
has_low_level_client)
ResourceNotExistsError: The 's3' resource does not exist.
The available resources are:
-
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)
at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1929)
at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
at org.apache.spark.rdd.RDD.collect(RDD.scala:926)
at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:405)
at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)
at py4j.Gateway.invoke(Gateway.java:259)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:209)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/worker.py", line 111, in main
process()
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/worker.py", line 106, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/mnt/yarn/usercache/chqiang/appcache/application_1521024688288_67008/container_1521024688288_67008_01_000002/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/usr/local/lib/python2.7/site-packages/extract-1.0-py2.7.egg/extract.py", line 111, in <lambda>
File "./lib.zip/extract.py", line 115, in process
local_path = download_file_from_s3(self.app_name, file_path)
File "./lib.zip/extract.py", line 22, in download_file_from_s3
s3 = boto3.resource('s3')
File "./lib.zip/boto3/__init__.py", line 100, in resource
return _get_default_session().resource(*args, **kwargs)
File "./lib.zip/boto3/session.py", line 347, in resource
has_low_level_client)
ResourceNotExistsError: The 's3' resource does not exist.
The available resources are:
-
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)
at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
... 1 more
能帮我吗?谢谢!
答案 0 :(得分:0)
我将相关性软件包打包为whl文件而不是zip软件包,然后将它们全部添加到--py-files
参数(例如a.whl,b.whl,c.whl
),将代码中的s3=boto3.resource('s3')
更改为{{ 1}},并试图成功
答案 1 :(得分:0)
我今天遇到了同样的错误。我尝试使用客户端而不是资源来修复它,但是这又给出了另一个错误。
botocore.exceptions.DataNotFoundError: Unable to load data for: endpoints
我搜索了一下,得出的结论是无法压缩boto3软件包,因为打包会导致丢失几个.json
文件。 endpoints.json
是其中之一。它存在于您的botocore/data
目录中,但是当您压缩它时,它没有它。
解决方案是在引导过程中安装boto3。您可以创建引导文件并在集群构建过程中提供它(AWS EMR控制台中有一个选项)。这会将boto3安装在主节点和所有从节点上。
您也可以参考以下答案: boto3 cannot create client on pyspark worker?