ImportError:Windows上没有名为numpy的模块

时间:2020-02-26 16:07:13

标签: python pyspark jupyter-notebook

嗨,我是pyspark的新手,因为我1周前才刚刚学习它,因此我寻求此错误的帮助:

导入错误:没有名为numpy的模块

任何善良的灵魂都能弄清为什么找不到我的麻木吗?我尝试了以下操作:卸载numpy,然后以管理员身份通过anaconda cmd再次安装。检查我的环境变量中的python_home。重新启动我的jupyter笔记本内核

    def parse_line(l):
        try:
           return l.split(",")
         except:
           print("error in processing {0}".format(l))

    data = sc.textFile('YearPredictionMSD.txt').map(lambda x : parse_line(x)).toDF()
    data_label = data.rdd.map(lambda x: LabeledPoint(x[0], x[1:]))
    data_train = data_label.zipWithIndex().filter(lambda x: x[1] < 463715)
    data_test = data_label.zipWithIndex().filter(lambda x: x[1] >= 463715)



 ---------------------------------------------------------------------------
    Py4JJavaError                             Traceback (most recent call last)
    <ipython-input-4-ed224fb17ae0> in <module>
    ----> 1 data_train = data_label.zipWithIndex().filter(lambda x: x[1] < 463715)
          2 
          3 data_test = data_label.zipWithIndex().filter(lambda x: x[1] >= 463715)

    C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\rdd.py in zipWithIndex(self)
       2244         starts = [0]
       2245         if self.getNumPartitions() > 1:
    -> 2246             nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
       2247             for i in range(len(nums) - 1):
       2248                 starts.append(starts[-1] + nums[i])

    C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\rdd.py in collect(self)
        887         """
        888         with SCCallSiteSync(self.context) as css:
    --> 889             sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
        890         return list(_load_from_socket(sock_info, self._jrdd_deserializer))
        891 

    C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\py4j-0.10.8.1-src.zip\py4j\java_gateway.py in __call__(self, *args)
       1284         answer = self.gateway_client.send_command(command)
       1285         return_value = get_return_value(
    -> 1286             answer, self.gateway_client, self.target_id, self.name)
       1287 
       1288         for temp_arg in temp_args:

    C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\sql\utils.py in deco(*a, **kw)
         96     def deco(*a, **kw):
         97         try:
    ---> 98             return f(*a, **kw)
         99         except py4j.protocol.Py4JJavaError as e:
        100             converted = convert_exception(e.java_exception)

    C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\py4j-0.10.8.1-src.zip\py4j\protocol.py in get_return_value(answer, gateway_client, target_id, name)
        326                 raise Py4JJavaError(
        327                     "An error occurred while calling {0}{1}{2}.\n".
    --> 328                     format(target_id, ".", name), value)
        329             else:
        330                 raise Py4JError(

    Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
    : org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 1.0 failed 1 times, most recent failure: Lost task 2.0 in stage 1.0 (TID 3, DESKTOP-MRGDUK2, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
      File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\worker.py", line 579, in main
      File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\worker.py", line 71, in read_command
      File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\serializers.py", line 172, in _read_with_length
        return self.loads(obj)
      File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\serializers.py", line 700, in loads
        return pickle.loads(obj, encoding=encoding)
      File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\mllib\__init__.py", line 28, in <module>
        import numpy
    ModuleNotFoundError: No module named 'numpy'

1 个答案:

答案 0 :(得分:0)

在终端中运行以下命令

对于python2

pip install numpy

对于python3

pip3 install numpy
相关问题