我有一个pyspark数据框。
示例:
- Tag: myFunction
我想连接两个数组的名称和年龄。 我是这样的:
final streamedRequest = new StreamedRequest(
'PUT', Uri.parse(presignedUrl))
..headers
.addAll({HttpHeaders.contentTypeHeader: 'video/quicktime'});
streamedRequest.contentLength = await file.length();
file.openRead().listen((chunk) {
setState(() {
this.chunkUploaded += chunk.length;
this.percentageUploaded =
(chunkUploaded / streamedRequest.contentLength) * 100;
});
streamedRequest.sink.add(chunk);
}, onDone: () {
streamedRequest.sink.close();
});
var videoUploadResponse = await streamedRequest.send();```
但是我缺少一些列,看来ID | phone | name <array> | age <array>
-------------------------------------------------
12 | 827556 | ['AB','AA'] | ['CC']
-------------------------------------------------
45 | 87346 | null | ['DD']
-------------------------------------------------
56 | 98356 | ['FF'] | null
-------------------------------------------------
34 | 87345 | ['AA','BB'] | ['BB']
适用于不在数组上的String并删除重复项:
预期结果:
df = df.withColumn("new_column", F.concat(df.name, df.age))
df = df.select("ID", "phone", "new_column")
知道我正在使用concat function
的情况下如何在pyspark中连接2个数组
谢谢
答案 0 :(得分:2)
您也可以使用selectExpr
。
testdata = [(0, ['AB','AA'], ['CC']), (1, None, ['DD']), (2, ['FF'] ,None), (3, ['AA','BB'] , ['BB'])]
df = spark.createDataFrame(testdata, ['id', 'name', 'age'])
>>> df.show()
+---+--------+----+
| id| name| age|
+---+--------+----+
| 0|[AB, AA]|[CC]|
| 1| null|[DD]|
| 2| [FF]|null|
| 3|[AA, BB]|[BB]|
+---+--------+----+
>>> df.selectExpr('''array(concat_ws(',',name,age)) as joined''').show()
+----------+
| joined|
+----------+
|[AB,AA,CC]|
| [DD]|
| [FF]|
|[AA,BB,BB]|
+----------+
答案 1 :(得分:1)
此帮助:
from pyspark.sql.functions import col, concat
testdata = [(0, ['a','b','d'], ['a2','b2','d2']), (1, ['c'], ['c2']), (2, ['d','e'],['d2','e2'])]
df = spark.createDataFrame(testdata, ['id', 'codes', 'codes2'])
df2 = df.withColumn("new_column",concat(col("codes"), col("codes2")))
连接后,结果为:
+---+---------+------------+--------------------+
| id| codes | codes2 | new_column |
+---+---------+------------+--------------------+
| 0 |[a, b, d]|[a2, b2, d2]|[a, b, d, a2, b2,...|
| 1 |[c] |[c2] |[c, c2] |
| 2 |[d, e] |[d2, e2] |[d, e, d2, e2] |
+---+---------+------------+--------------------+
致谢
答案 2 :(得分:1)
对于spark <2.4,我们需要一个udf来连接数组。希望这会有所帮助。
from pyspark.sql import functions as F
from pyspark.sql.types import *
df = spark.createDataFrame([('a',['AA','AB'],['BC']),('b',None,['CB']),('c',['AB','BA'],None),('d',['AB','BB'],['BB'])],['c1','c2','c3'])
df.show()
+---+--------+----+
| c1| c2 | c3 |
+---+--------+----+
| a|[AA, AB] |[BC]|
| b| null |[CB]|
| c|[AB, BA] |null|
| d|[AB, BB] |[BB]|
+---+--------+----+
## changing null to empty array
df = df.withColumn('c2',F.coalesce(df.c2,F.array())).withColumn('c3',F.coalesce(df.c3,F.array()))
df.show()
+---+--------+----+
| c1| c2 | c3 |
+---+--------+----+
| a|[AA, AB] |[BC]|
| b| [] |[CB]|
| c|[AB, BA] | [] |
| d|[AB, BB] |[BB]|
+---+--------+----+
## UDF to concat the columns and remove the duplicates
udf1 = F.udf(lambda x,y: list(dict.fromkeys(x+y)), ArrayType(StringType()))
df = df.withColumn('concatd',udf1(df.c2,df.c3))
df.show()
+---+--------+----+------------+
| c1| c2 | c3 | concatd |
+---+--------+----+------------+
| a|[AA, AB] |[BC]|[AA, AB, BC]|
| b| [] |[CB]| [CB] |
| c|[AB, BA] | [] | [AB, BA] |
| d|[AB, BB] |[BB]| [AB, BB] |
+---+--------+----+------------+
答案 3 :(得分:1)
无需使用以下UDF的Spark解决方案(Spark <2.4)
import pyspark.sql.functions as F
testdata = [(0, ['AB','AA'], ['CC']), (1, None, ['DD']), (2, ['FF'] ,None), (3, ['AA','BB'] , ['BB'])]
df = spark.createDataFrame(testdata, ['id', 'name', 'age'])
df.show()
+---+--------+----+
| id| name| age|
+---+--------+----+
| 0|[AB, AA]|[CC]|
| 1| null|[DD]|
| 2| [FF]|null|
| 3|[AA, BB]|[BB]|
+---+--------+----+
df = df.withColumn('name', F.concat_ws(',', 'name'))
df = df.withColumn('age', F.concat_ws(',', 'age'))
df = df.withColumn("new_column",F.concat_ws(',', df.name, df.age))
df = df.withColumn("new_column",F.regexp_replace(df.new_column, "^,", ''))
df = df.withColumn("new_column",F.regexp_replace(df.new_column, "\,$", ''))
df.withColumn("new_column",F.split(df.new_column, ",")).show(5, False)
+---+-----+---+------------+
|id |name |age|new_column |
+---+-----+---+------------+
|0 |AB,AA|CC |[AB, AA, CC]|
|1 | |DD |[DD] |
|2 |FF | |[FF] |
|3 |AA,BB|BB |[AA, BB, BB]|
+---+-----+---+------------+