我想按组规范pyspark中的数据框。建议的here解决方案没有帮助,因为我想转换数据框中的每一列。 我在pandas df上的python中使用的代码如下:
BOOL
如何使用df或RDD在pyspark中做到这一点?
示例: 输入:
df_norm = (X_df
.groupby('group')
.transform(lambda x: (x - x.min())/(x.max() - x.min()))
.fillna(0))
所需的输出:
columns = ['group', 'sensor1', 'sensor2', 'sensor3']
vals = [
(a, 0.8, 0.02, 100),
(a, 0.5, 0.1, 200),
(a, 1, 0.5, 50),
(a, 0, 0.8, 30)
(b, 10, 1, 0)
(b, 20, 2, 3)
(b, 5, 4, 1)
]
答案 0 :(得分:1)
我最终以这种方式这样做:
w = Window.partitionBy('group')
for c in cols_to_normalize:
df = (df.withColumn('mini', F.min(c).over(w))
.withColumn('maxi', F.max(c).over(w))
.withColumn(c, ((F.col(c) - F.col('mini')) / (F.col('maxi') - F.col('mini'))))
.drop('mini')
.drop('maxi'))
答案 1 :(得分:0)
我正在使用spark 2.3.0
。您可以执行以下操作:
from pyspark.sql.types import *
from pyspark.sql.functions import pandas_udf
from pyspark.sql.functions import PandasUDFType
# group function will use this schema
schema = StructType([
StructField("group", StringType()),
StructField("sensor1", DoubleType()),
StructField("sensor2", DoubleType()),
StructField("sensor3", DoubleType()),
])
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def func(df):
# you don't need to do this if sensor columns already are float
df.iloc[:,1:] = df.iloc[:,1:].astype(float)
# select column to normalize
cols = df.columns.difference(['group'])
# do groupby
result = df.groupby('group')[cols].apply(lambda x: (x - x.min())/(x.max() - x.min()))
return pd.concat([df['group'], result], axis=1)
# apply the function
df.groupby('group').apply(func)
+-----+------------------+-------------------+-------------------+
|group| sensor1| sensor2| sensor3|
+-----+------------------+-------------------+-------------------+
| b|0.3333333333333333| 0.0| 0.0|
| b| 1.0| 0.3333333333333333| 1.0|
| b| 0.0| 1.0| 0.3333333333333333|
| a| 0.8| 0.0| 0.4117647058823529|
| a| 0.5|0.10256410256410256| 1.0|
| a| 1.0| 0.6153846153846153|0.11764705882352941|
| a| 0.0| 1.0| 0.0|
+-----+------------------+-------------------+-------------------+
答案 2 :(得分:0)
from pyspark.sql.functions import min, max
from pyspark.sql.window import Window
vals = [('a',0.8,0.02,100),('a',0.5,0.1,200),('a',1.0,0.5,50),('a',0.0,0.8,30),
('b',10.0,1.0,0),('b',20.0,2.0,3),('b',5.0,4.0,1)]
df = sqlContext.createDataFrame(vals,['group', 'sensor1', 'sensor2', 'sensor3'])
df.show()
+-----+-------+-------+-------+
|group|sensor1|sensor2|sensor3|
+-----+-------+-------+-------+
| a| 0.8| 0.02| 100|
| a| 0.5| 0.1| 200|
| a| 1.0| 0.5| 50|
| a| 0.0| 0.8| 30|
| b| 10.0| 1.0| 0|
| b| 20.0| 2.0| 3|
| b| 5.0| 4.0| 1|
+-----+-------+-------+-------+
w = Window().partitionBy('group')
df = df.withColumn('min_sensor1',min(col('sensor1')).over(w))\
.withColumn('max_sensor1',max(col('sensor1')).over(w))\
.withColumn('min_sensor2',min(col('sensor2')).over(w))\
.withColumn('max_sensor2',max(col('sensor2')).over(w))\
.withColumn('min_sensor3',min(col('sensor3')).over(w))\
.withColumn('max_sensor3',max(col('sensor3')).over(w))\
.withColumn('sensor1',((col('sensor1')-col('min_sensor1'))/(col('max_sensor1')-col('min_sensor1'))))\
.withColumn('sensor2',((col('sensor2')-col('min_sensor2'))/(col('max_sensor2')-col('min_sensor2'))))\
.withColumn('sensor3',((col('sensor3')-col('min_sensor3'))/(col('max_sensor3')-col('min_sensor3'))))\
.drop('min_sensor1','max_sensor1','min_sensor2','max_sensor2','min_sensor3','max_sensor3')
df.show()
+-----+------------------+-------------------+-------------------+
|group| sensor1| sensor2| sensor3|
+-----+------------------+-------------------+-------------------+
| b|0.3333333333333333| 0.0| 0.0|
| b| 1.0| 0.3333333333333333| 1.0|
| b| 0.0| 1.0| 0.3333333333333333|
| a| 0.8| 0.0| 0.4117647058823529|
| a| 0.5|0.10256410256410256| 1.0|
| a| 1.0| 0.6153846153846153|0.11764705882352941|
| a| 0.0| 1.0| 0.0|
+-----+------------------+-------------------+-------------------+