stateDF
.withWatermark("t","1 seconds")
.groupBy(window($"t","1 minutes","1 minutes"),$"hid")
.agg(collect_list("id"))
.writeStream.outputMode("append")
.format("console").trigger(ProcessingTime("1 minutes"))
.start().awaitTermination()
当我添加' collect_list'时,我会遇到此问题。 但是通过火花核心可以做到。 ERROR:
java.lang.RuntimeException:Collect不能用于部分聚合。 在scala.sys.package $ .error(package.scala:27) 在......
java.util.concurrent.ThreadPoolExecutor中$ Worker.run(ThreadPoolExecutor.java:624) 在java.lang.Thread.run(Thread.java:748) 在这里输入代码
答案 0 :(得分:0)
通过我的探索,我有两个解决这个问题的方法。
Methed 1:使用SPARK-1893修改源代码,但我不建议这样做。
方法2:为自己制作用户定义的聚合函数(UDAF)。虽然这很麻烦,但它很有效。以下是我的代码,欢迎正确!
class CollectList extends UserDefinedAggregateFunction {
override def inputSchema: StructType = StructType(StructField("id", StringType, nullable = true) :: StructField("state", StringType, nullable = true):: Nil)
override def bufferSchema: StructType = StructType(StructField("ids", ArrayType(StringType, containsNull = true), nullable = true) :: Nil)
override def dataType: ArrayType = ArrayType(StringType, containsNull = true)
override def deterministic: Boolean = false
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = null
}
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
if (buffer.get(0) == null){
buffer(0) = Array(input.getString(0) + "_" + input.getString(1))
}
else {
val s = input.getString(0) + "_" + input.getString(1)
val b = buffer.getAs[Seq[String]](0)
buffer(0) = b :+ s
}
}
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
if (buffer1.getAs[Seq[String]](0) == null){
buffer1(0) = buffer2.getAs[Seq[String]](0).distinct
}
else {
buffer1(0) = (buffer1.getAs[Seq[String]](0) ++ buffer2.getAs[Seq[String]](0)).distinct
}
}
override def evaluate(buffer: Row): Any = buffer.getAs[Seq[String]](0)
}