OSError:将sqldataframe转换为pyspark中的pandas数据框时参数无效

时间:2018-08-16 03:57:27

标签: pandas pyspark

我使用以下代码加载了一个csv文件

from pyspark import SparkContext
from pyspark.sql import *
sc = SparkContext(master='local[1]')
df = sq.read.csv(file_path,header='true',inferSchema='true')

但是,当我尝试将以下spark数据框转换为熊猫数据框时,请使用以下代码

pdf = df.toPandas()

我遇到以下错误

OSError                                   Traceback (most recent call last)
<ipython-input-27-cf3578af3a8d> in <module>()
----> 1 a = df.toPandas()

D:\softwares\anaconda\lib\site-packages\pyspark\sql\dataframe.py in toPandas(self)
   1964                 raise RuntimeError("%s\n%s" % (_exception_message(e), msg))
   1965         else:
-> 1966             pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
   1967
   1968             dtype = {}

D:\softwares\anaconda\lib\site-packages\pyspark\sql\dataframe.py in collect(self)
    465         with SCCallSiteSync(self._sc) as css:
    466             port = self._jdf.collectToPython()
--> 467         return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
    468
    469     @ignore_unicode_prefix

D:\softwares\anaconda\lib\site-packages\pyspark\serializers.py in load_stream(self, stream)
    143         while True:
    144             try:
--> 145                 yield self._read_with_length(stream)
    146             except EOFError:
    147                 return

D:\softwares\anaconda\lib\site-packages\pyspark\serializers.py in _read_with_length(self, stream)
    168         if len(obj) < length:
    169             raise EOFError
--> 170         return self.loads(obj)
    171
    172     def dumps(self, obj):

D:\softwares\anaconda\lib\site-packages\pyspark\serializers.py in loads(self, obj, encoding)
    557     if sys.version >= '3':
    558         def loads(self, obj, encoding="bytes"):
--> 559             return pickle.loads(obj, encoding=encoding)
    560     else:
    561         def loads(self, obj, encoding=None):

D:\softwares\anaconda\lib\site-packages\pyspark\sql\types.py in <lambda>(*a)
   1426 # This is used to unpickle a Row from JVM
   1427 def _create_row_inbound_converter(dataType):
-> 1428     return lambda *a: dataType.fromInternal(a)
   1429
   1430

D:\softwares\anaconda\lib\site-packages\pyspark\sql\types.py in fromInternal(self, obj)
    628             # Only calling fromInternal function for fields that need conversion
    629             values = [f.fromInternal(v) if c else v
--> 630                       for f, v, c in zip(self.fields, obj, self._needConversion)]
    631         else:
    632             values = obj

D:\softwares\anaconda\lib\site-packages\pyspark\sql\types.py in <listcomp>(.0)
    628             # Only calling fromInternal function for fields that need conversion
    629             values = [f.fromInternal(v) if c else v
--> 630                       for f, v, c in zip(self.fields, obj, self._needConversion)]
    631         else:
    632             values = obj

D:\softwares\anaconda\lib\site-packages\pyspark\sql\types.py in fromInternal(self, obj)
    440
    441     def fromInternal(self, obj):
--> 442         return self.dataType.fromInternal(obj)
    443
    444     def typeName(self):

D:\softwares\anaconda\lib\site-packages\pyspark\sql\types.py in fromInternal(self, ts)
    198         if ts is not None:
    199             # using int to avoid precision loss in float
--> 200             return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
    201
    202

OSError: [Errno 22] Invalid argument

有人可以帮助我解决此错误吗?

0 个答案:

没有答案