使用带有Spark的结构化流,每个查询花费更多时间

时间:2018-06-25 08:52:16

标签: scala apache-spark apache-spark-sql spark-structured-streaming

我正在使用Spark 2.3.0,Scala 2.11.8和Kafka,并且我尝试使用结构化流技术将来自Kafka的所有消息写入Parquet文件中,但是对于每个查询,我的实现都占用了每个消息的总时间增加很多Spark Stages Image。 我想知道为什么会这样,我尝试使用不同的可能触发器(Continues,0秒,1秒,10秒,10分钟等),并且总是得到相同的行为。我的代码具有以下结构:

import org.apache.spark.sql.functions._
import org.apache.spark.sql.{Column, SparkSession}
import com.name.proto.ProtoMessages
import java.io._
import java.text.{DateFormat, SimpleDateFormat}
import java.util.Date
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.streaming.OutputMode

object StructuredStreaming {

  def message_proto(value:Array[Byte]): Map[String, String] = {     

    try {
      val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
      val impression_proto = ProtoMessages.TrackingRequest.parseFrom(value)

      val json = Map(
       "id_req" -> (impression_proto.getIdReq().toString),
       "ts_imp_request" -> (impression_proto.getTsRequest().toString),
       "is_after" -> (impression_proto.getIsAfter().toString),
       "type" -> (impression_proto.getType().toString)
      )    
      return json

    }catch{
      case e:Exception=>
        val pw = new PrintWriter(new File("/home/data/log.log" ))
        pw.write(e.toString)
        pw.close()

        return Map("error" -> "error")       
    }
  }

  def main(args: Array[String]){

    val proto_impressions_udf = udf(message_proto _)
    val spark = SparkSession.builder.appName("Structured Streaming ").getOrCreate()
    //fetchOffset.numRetries, fetchOffset.retryIntervalMs
    val stream = spark.readStream.format("kafka")
      .option("kafka.bootstrap.servers", "ip:9092")
      .option("subscribe", "ssp.impressions")
      .option("startingOffsets", "latest")
      .option("max.poll.records", "1000000")
      .option("auto.commit.interval.ms", "100000")
      .option("session.timeout.ms", "10000")
      .option("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
      .option("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer")
      .option("failOnDataLoss", "false")
      .option("latestFirst", "true")
      .load()

    try{
      val query = stream.select(col("value").cast("string"))
        .select(proto_impressions_udf(col("value")) as "value_udf")
        .select(col("value_udf")("id_req").as("id_req"), col("value_udf")("is_after").as("is_after"),
          date_format(col("value_udf")("ts_request"), "yyyy").as("date").as("year"),
          date_format(col("value_udf")("ts_request"), "MM").as("date").as("month"),
          date_format(col("value_udf")("ts_request"), "dd").as("date").as("day"),
          date_format(col("value_udf")("ts_request"), "HH").as("date").as("hour"))
      val query2 = query.writeStream.format("parquet")
                        .option("checkpointLocation", "/home/data/impressions/checkpoint")
                        .option("path", "/home/data/impressions")
                        .outputMode(OutputMode.Append())
                        .partitionBy("year", "month", "day", "hour")
                        .trigger(Trigger.ProcessingTime("1 seconds"))
                        .start()           
    }catch{    
      case e:Exception=>
        val pw = new PrintWriter(new File("/home/data/log.log" ))
        pw.write(e.toString)
        pw.close()    
    }    
  }
}

我从Spark UI附加了其他图像:

Jobs Environment Executors Thread Dump Sql Query Plan

1 个答案:

答案 0 :(得分:0)

您的问题与批次有关,您需要定义一个良好的时间来处理每个批次,这取决于您的集群处理能力。同样,解决每个批次的时间取决于您是否接收到所有不包含空值的字段,因为如果您收到大量的空值字段,则处理该批处理将花费更少的时间。