调用z时发生错误:org.apache.spark.api.python.PythonRDD.collectAndServe

时间:2016-05-02 14:31:13

标签: python-3.x apache-spark pyspark spark-dataframe

我很新兴,在将.csv文件转换为数据帧时遇到错误。我使用pyspark_csv模块进行转换但出错了, 这里是错误的堆栈跟踪,可以给我任何一个解决此错误的建议

---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-16-67fe725a8e27> in <module>()
----> 1 data_df = pycsv.csvToDataFrame(sqlCtx, data_body, sep=",", columns=data_header.split('\t')).cache()

/usr/spark-1.5.0/python/pyspark_csv.py in csvToDataFrame(sqlCtx, rdd, columns, sep, parseDate)
     51         rdd_sql = rdd_array.zipWithIndex().filter(
     52             lambda r_i: r_i[1] > 0).keys()
---> 53     column_types = evaluateType(rdd_sql, parseDate)
     54 
     55     def toSqlRow(row):

/usr/spark-1.5.0/python/pyspark_csv.py in evaluateType(rdd_sql, parseDate)
    177 def evaluateType(rdd_sql, parseDate):
    178     if parseDate:
--> 179         return rdd_sql.map(getRowType).reduce(reduceTypes)
    180     else:
    181         return rdd_sql.map(getRowTypeNoDate).reduce(reduceTypes)

/usr/spark-1.5.0/python/pyspark/rdd.py in reduce(self, f)
    797             yield reduce(f, iterator, initial)
    798 
--> 799         vals = self.mapPartitions(func).collect()
    800         if vals:
    801             return reduce(f, vals)

/usr/spark-1.5.0/python/pyspark/rdd.py in collect(self)
    771         """
    772         with SCCallSiteSync(self.context) as css:
--> 773             port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
    774         return list(_load_from_socket(port, self._jrdd_deserializer))
    775 

/usr/spark-1.5.0/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py in __call__(self, *args)
    536         answer = self.gateway_client.send_command(command)
    537         return_value = get_return_value(answer, self.gateway_client,
--> 538                 self.target_id, self.name)
    539 
    540         for temp_arg in temp_args:

/usr/spark-1.5.0/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    298                 raise Py4JJavaError(
    299                     'An error occurred while calling {0}{1}{2}.\n'.
--> 300                     format(target_id, '.', name), value)
    301             else:
    302                 raise Py4JError(

Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 10.0 failed 1 times, most recent failure: Lost task 0.0 in stage 10.0 (TID 20, localhost): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 111, in main
    process()
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 106, in process
    serializer.dump_stream(func(split_index, iterator), outfile)
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "/usr/spark-1.5.0/python/pyspark/rdd.py", line 797, in func
    yield reduce(f, iterator, initial)
  File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes
    b_type = b[col]
IndexError: list index out of range

	at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:138)
	at org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:179)
	at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:97)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
	at org.apache.spark.scheduler.Task.run(Task.scala:88)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1280)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1268)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1267)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1267)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
	at scala.Option.foreach(Option.scala:236)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:697)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1493)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1455)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1444)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:567)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1813)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1826)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1839)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1910)
	at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:905)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:306)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:904)
	at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:373)
	at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
	at py4j.Gateway.invoke(Gateway.java:259)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.GatewayConnection.run(GatewayConnection.java:207)
	at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 111, in main
    process()
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/worker.py", line 106, in process
    serializer.dump_stream(func(split_index, iterator), outfile)
  File "/usr/spark-1.5.0/python/lib/pyspark.zip/pyspark/serializers.py", line 263, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "/usr/spark-1.5.0/python/pyspark/rdd.py", line 797, in func
    yield reduce(f, iterator, initial)
  File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes
    b_type = b[col]
IndexError: list index out of range

	at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:138)
	at org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:179)
	at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:97)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
	at org.apache.spark.scheduler.Task.run(Task.scala:88)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	... 1 more

这是我的代码,最后声明它在从csv转换为dataframe时出现此错误

import findspark
findspark.init()
findspark.find()
import pyspark   
sc=pyspark.SparkContext(appName="myAppName")
sqlCtx = pyspark.SQLContext

#csv to dataframe

sc.addPyFile('/usr/spark-1.5.0/python/pyspark_csv.py')

import pyspark_csv as pycsv


def skip_header(idx, iterator):
    if(idx == 0):
        next(iterator)
    return iterator

data=sc.textFile('gdeltdata/20160427.CSV')

data_header = data.first()

data_body = data.mapPartitionsWithIndex(skip_header)

data_df = pycsv.csvToDataFrame(sqlCtx, data_body, sep=",", columns=data_header.split('\t'))

2 个答案:

答案 0 :(得分:0)

我实际上无法发表评论,但是,如果没有任何代码,我将不得不猜测您正在尝试引用一个不存在的字符串中存在的索引 - 这将是与执行以下操作相同:

string = 'hello' new_char = string[6]

这会尝试在5个字母的字符串上找到第7个字母 - 这会带来以下错误:

IndexError: string index out of range

由于我没有看到导致该错误的代码,因此我能够提供有关您的问题的所有内容。

答案 1 :(得分:0)

您的 CSV 文件格式似乎不正确。

实际错误有点深:

  File "/tmp/spark-d85b88bf-e4a4-46b8-8b51-eaf0f03e48ab/userFiles-40f9eb34-4efa-4ffb-aaf5-ebcb24a4ecb9/pyspark_csv.py", line 160, in reduceTypes
    b_type = b[col]
IndexError: list index out of range

这表示其中一行没有预期的列数(它可能是文件末尾的空白行)。