自定义累加器的java.lang.NullPointerException

时间:2017-09-07 14:52:10

标签: java apache-spark serialization

我已经创建了一个自定义累加器,如下所示。这是可序列化的

public class ABCAccumulator extends AccumulatorV2<String, Set> implements Serializable {
    Set<String> set = new HashSet();
 @Override
    public void add(String v) {
        set.add(v);
    }
}

首先,是不是有一个Spark API来为任何集合创建一个累加器(比如Set,Map等,我知道CollectionAccumulator是用于List的)?

其次,我正在使用此累加器在RDD中添加所有值,如下所示:

ABCAccumulator acc = new ABCAccumulator ();
sparkContext.register(acc);

rdd.foreach(record -> {
acc.add(record.getName());
});

但是当我运行我的代码时,我得到一个例外:

 org.apache.spark.SparkException: Task not serializable
        at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:298)
        at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:288)
        at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:108)
        at org.apache.spark.SparkContext.clean(SparkContext.scala:2287)
        at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:917)
        at org.apache.spark.rdd.RDD$$anonfun$foreach$1.apply(RDD.scala:916)
        at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
        at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
        at org.apache.spark.rdd.RDD.foreach(RDD.scala:916)
        at org.apache.spark.api.java.JavaRDDLike$class.foreach(JavaRDDLike.scala:351)
        at org.apache.spark.api.java.AbstractJavaRDDLike.foreach(JavaRDDLike.scala:45)
        at com.def.ghi.jkl.mno.ActualClass.lambda$main$ed7564e9$1(ActualClass.java:154)
        at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
        at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
        at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
        at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:416)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:50)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
        at scala.util.Try$.apply(Try.scala:192)
        at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:257)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
        at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:256)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.NullPointerException
        at org.apache.spark.util.AccumulatorV2.copyAndReset(AccumulatorV2.scala:129)
        at org.apache.spark.util.AccumulatorV2.writeReplace(AccumulatorV2.scala:167)
        at sun.reflect.GeneratedMethodAccessor9.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at java.io.ObjectStreamClass.invokeWriteReplace(ObjectStreamClass.java:1118)
        at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1136)
        at java.io.ObjectOutputStream.writeArray(ObjectOutputStream.java:1378)
        at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1174)
        at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
        at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
        at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
        at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
        at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
        at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
        at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
        at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
        at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:348)
        at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:43)
        at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100)
        at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:295)

请帮助

2 个答案:

答案 0 :(得分:2)

我认为它失败了,因为<a data-method="get" href="/user_notifications/new"> <div id="createBtn">Create Notification</div> </a> 没有适当的实施所有方法。

尝试类似的内容:

ABCAccumulator

答案 1 :(得分:0)

您可以通过扩展accummulatorV2并提供自己的适当输入和输出来创建自己的自定义累加器,我可以在我的用例中分享一个示例。

考虑到您的问题,我认为您在一个动作内(即在forEach内)使用对象,请确保它正在实现可序列化,否则它的依赖关系应该正确。这将解决您的错误。

自定义累加器的示例,例如,假设您有以下用户定义的映射类型度量,可在作业运行后使用:

case class SampleOutputStat(stats: Map[String, Long] = Map()) extends Serializable {
// method to define logic for adding metric up during a transformation 
  def add(record: String): SampleOutputStat = {
    val existingCount = stats.getOrElse(record, 0L)
    this.copy(stats = stats.filterKeys{key: String => !key.equals(record)} ++ 
Map(record -> (existingCount + 1L)))
  }
// method to define logic for merging two metric instances during an action
  def merge(other: SampleOutputStat) = {
    this.copy(hostWiseStats =  mergeMaps(left.stats, right.stats))
  }

  private def mergeMaps(l: Map[String, Long], r: Map[String, Long]): Map[String, Long] = {
    (l.keySet union r.keySet).map { key =>
      key -> (l.getOrElse(key, 0L) + r.getOrElse(key, 0L))
    }.toMap
 }
}

现在如下定义您的自定义累加器类,在该类中它期望记录类型为String并因此输出一个SampleOutputStat对象,该对象跟踪您关心的派生指标:

//Accumulator class
class CustomAccumulator(var validationAccumulators: SampleOutputStat) extends 
AccumulatorV2[String, SampleOutputStat] {

  def reset(): Unit = {
    validationAccumulators = new SampleOutputStat()
  }

  def add(input: String): Unit = {
    validationAccumulators = validationAccumulators.add(input)
  }

  def value: SampleOutputStat = {
    validationAccumulators
  }

  def isZero: Boolean = {
    validationAccumulators.isEmpty
  }

  def copy(): CustomAccumulator = {
    new CustomAccumulator(validationAccumulators)
  }

  def merge(other: AccumulatorV2[String, SampleOutputStat]) = {
    validationAccumulators = validationAccumulators.merge(other.value)
  }

}