Kafka ConsumerRecord不能与SparkStreaming序列化

时间:2017-07-21 09:05:45

标签: apache-kafka spark-streaming kafka-consumer-api

我正在尝试在一组计算机上运行Kakfa-SparkStreaming脚本。

我有一个Kakfa Producer,可以通过2个不同的主题发送JSON消息。

我的Spark脚本的代码是:

package com.unimi.lucaf

//spark imports
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.DStream

//kafka imports
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe

//json library imports
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization
import org.json4s.native.Serialization.{read, write}

object App {

     def main(args : Array[String]) { 

    // add setMaster() with information about the master node url (spark://0.0.0.0:7077) or the local (local[n])
    val sparkConf = new SparkConf().setAppName("SparkScript").setMaster("spark://0.0.0.0:7077")
    val ssc = new StreamingContext(sparkConf, Seconds(5))

    case class Thema(name: String, metadata: JObject)
    case class Tempo(unit: String, count: Double, metadata: JObject)
    case class Spatio(unit: String, metadata: JObject)
    case class Stt(spatial: Spatio, temporal: Tempo, thematic: Thema)
    case class Location(latitude: Double, longitude: Double, name: String)
    val kafkaParams = Map[String, Object](
        "bootstrap.servers" -> "0.0.0.0:9092",
        "key.deserializer" -> classOf[StringDeserializer].getCanonicalName,
        "value.deserializer" -> classOf[StringDeserializer].getCanonicalName,
        "group.id" -> "test_luca",
        "auto.offset.reset" -> "latest",
        "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    //Source s1
    case class Datas1(location : Location, timestamp : String, measurement : Double, accuracy : Double)
    case class Sensors1(sensor_name: String, start_date: String, end_date: String, data_schema: Array[String], data: Datas1, stt: Stt)
    val topics_s1 = Array("topics1")
    val stream_s1 = KafkaUtils.createDirectStream[String, String](ssc, PreferConsistent, Subscribe[String, String](topics_s1, kafkaParams))
    val s1 = stream_s1.map(record => {
        implicit val formats = DefaultFormats
        parse(record.value).extract[Sensors1]
    }
    )

    //Source s2
    case class Datas2(location : Location, timestamp : String, measurement : Double, otherm : Double)
    case class Sensors2(sensor_name: String, start_date: String, end_date: String, data_schema: Array[String], data: Datas2, stt: Stt)
    val topics_s2 = Array("topics2")
    val stream_s2 = KafkaUtils.createDirectStream[String, String](ssc, PreferConsistent, Subscribe[String, String](topics_s2, kafkaParams))
    val s2 = stream_s2.map(record => {
        implicit val formats = DefaultFormats
        parse(record.value).extract[Sensors2]
    }
    )

    //Join j1 rates: 10 seconds
    val j1s1 = s1.map(x => (x.data.timestamp+" "+x.data.location.latitude+" "+x.data.location.longitude, (x))).window(Seconds(10), Seconds(10))
    val j1s2 = s2.map(x => (x.data.timestamp+" "+x.data.location.latitude+" "+x.data.location.longitude, (x))).window(Seconds(10), Seconds(10))
    val j1pre = j1s1.join(j1s2)
    case class Dataj1(location : Location, s1_measurement : Double, s1_accuracy : Double, s2_measurement : Double, s2_otherm : Double, timestamp : String)
    case class Sensorj1(sensor_name: String, start_date: String, end_date: String, data_schema: Array[String], data: Dataj1, stt: Stt)
    val j1 = j1pre.map { r => new Sensorj1("j1", r._2._1.start_date, r._2._1.end_date, r._2._1.data_schema, new Dataj1(new Location(r._2._1.data.location.latitude, r._2._1.data.location.longitude, r._2._1.data.location.name), r._2._1.data.measurement, r._2._1.data.accuracy, r._2._2.data.measurement, r._2._2.data.otherm, r._2._2.data.timestamp), new Stt(new Spatio(r._2._1.stt.spatial.unit, r._2._1.stt.spatial.metadata), new Tempo(r._2._1.stt.temporal.unit, r._2._1.stt.temporal.count, r._2._1.stt.temporal.metadata), new Thema(r._2._1.stt.thematic.name, r._2._1.stt.thematic.metadata)))}      

    j1.foreachRDD { rdd =>
         val collected = rdd.map(record => (record.sensor_name, record.sensor_name)).collect()
         for ( c <- collected ) {
           println(c)
         }
}
    //Execution of the script
    ssc.start()
    ssc.awaitTermination()
}
}

本地运行。但是当我尝试在机器集群上运行它时,我得到了这个错误:

Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 0.0 in stage 8.0 (TID 8) had a not serializable result: org.apache.kafka.clients.consumer.ConsumerRecord
Serialization stack:
- object not serializable (class: org.apache.kafka.clients.consumer.ConsumerRecord, value: ConsumerRecord(topic = topics1, partition = 0, offset = 3868403, CreateTime = -1, checksum = 1010027992, serialized key size = -1, serialized value size = 476, key = null, value = {"sensor_name" : "s1", "start_date" : "2017-01-01T00:00:00", "end_date" : "20501231T235900", "data_schema" : ["val1","val1"], "data" : {"location" :{"latitude": 49.1234, "longitude": 12.8765, "name": "null"}, "timestamp": "2017-07-21T10:38:07 627", "measurement": 18, "unit" : "celsius", "accuracy" : 3.0}, "stt": {"spatial": {"unit": "point", "metadata": {}}, "temporal": {"unit": "minutes", "count": 13, "metadata": {}}, "thematic": {"name": "temperature", "metadata": {}}}}))

我已经尝试找到一些解决方案(例如在RDD上使用foreach)。但他们似乎没有工作。 你有什么建议吗?

由于 卢卡

0 个答案:

没有答案