这是我尝试编写CSV文件的代码。 将使用熊猫将Spark数据帧写入CSV文件
from pyspark.sql.functions import pandas_udf,PandasUDFType
import os
import csv
df3 = spark.createDataFrame(
[("a", 1, 0), ("a", -1, 42), ("b", 3, -1), ("b", 10, -2)],
("key", "value1", "value2")
)
from pyspark.sql.types import *
schema = StructType([
StructField("key", StringType()),
StructField("avg_value1", DoubleType()),
StructField("avg_value2", DoubleType()),
StructField("sum_avg", DoubleType()),
StructField("sub_avg", DoubleType()),
StructField("result", StringType())
])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def g(df):
gr = df['key'].iloc[0]
x = df.value1.mean()
y = df.value2.mean()
w = df.value1.mean() + df.value2.mean()
z = df.value1.mean() - df.value2.mean()
fileName = '/mnt/test' + gr + '.csv'
df.to_csv(fileName, sep='\t')
a = "Saved"
return pd.DataFrame([[gr]+[x]+[y]+[w]+[z]+[a]])
df3.groupby("key").apply(g).show()
输出:
+---+----------+----------+-------+-------+------+
|key|avg_value1|avg_value2|sum_avg|sub_avg|result|
+---+----------+----------+-------+-------+------+
| a| 0.0| 21.0| 21.0| -21.0| Saved|
| b| 6.5| -1.5| 5.0| 8.0| Saved|
+---+----------+----------+-------+-------+------+
但是未创建CSV文件。
任何建议将不胜感激。