读取AVRO数据时,火花作业会引发空指针异常

时间:2019-11-08 17:51:36

标签: apache-spark avro spark-avro

火花作业在读取数据时抛出空指针异常。我正在获取avro数据并将其加入另一个数据集,但出现此错误

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2039)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2027)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2026)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2026)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:966)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:966)
    at scala.Option.foreach(Option.scala:257)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:966)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2260)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2209)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2198)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:777)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
    at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:365)
    at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
    at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3384)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset$$anonfun$53.apply(Dataset.scala:3365)
    at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
    at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
    at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3364)
    at org.apache.spark.sql.Dataset.head(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset.take(Dataset.scala:2759)
    at org.apache.spark.sql.Dataset.getRows(Dataset.scala:255)
    at org.apache.spark.sql.Dataset.showString(Dataset.scala:292)
    at org.apache.spark.sql.Dataset.show(Dataset.scala:748)
    at org.apache.spark.sql.Dataset.show(Dataset.scala:725)
    at Test$.main(Test.scala:112)
    at Test.main(Test.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
    at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)
    at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
    at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)
    at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
    at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

由以下原因引起:java.lang.NullPointerException         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.supportedCategories(AvroObjectInspectorGenerator.java:142)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:91)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:121)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker(AvroObjectInspectorGenerator.java:104)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspector(AvroObjectInspectorGenerator.java:83)         在org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator。(AvroObjectInspectorGenerator.java:56)         在org.apache.hadoop.hive.serde2.avro.AvroSerDe.initialize(AvroSerDe.java:124)         在org.apache.spark.sql.hive.HadoopTableReader $$ anonfun $ 5 $$ anonfun $ 10.apply(TableReader.scala:258)         在org.apache.spark.sql.hive.HadoopTableReader $$ anonfun $ 5 $$ anonfun $ 10.apply(TableReader.scala:246)         在org.apache.spark.rdd.RDD $$ anonfun $ mapPartitions $ 1 $ anonfun $ apply $ 23.apply(RDD.scala:801)         在org.apache.spark.rdd.RDD $$ anonfun $ mapPartitions $ 1 $ anonfun $ apply $ 23.apply(RDD.scala:801)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.UnionRDD.compute(UnionRDD.scala:105)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)         在org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)         在org.apache.spark.rdd.RDD.iterator(RDD.scala:288)         在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)         在org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)         在org.apache.spark.scheduler.Task.run(Task.scala:121)         在org.apache.spark.executor.Executor $ TaskRunner $$ anonfun $ 10.apply(Executor.scala:402)         在org.apache.spark.util.Utils $ .tryWithSafeFinally(Utils.scala:1360)         在org.apache.spark.executor.Executor $ TaskRunner.run(Executor.scala:408)         在java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)         在java.util.concurrent.ThreadPoolExecutor $ Worker.run(ThreadPoolExecutor.java:624)         在java.lang.Thread.run(Thread.java:748)

这是我的sbt依赖项

libraryDependencies ++=  Seq(
  "org.apache.spark" %% "spark-core" % sparkVersion % "provided" exclude("org.apache.avro", "avro"),
  "org.apache.spark" % "spark-sql_2.11" % "2.3.0"
)

libraryDependencies += "org.apache.spark" %% "spark-avro" % "2.4.4"
libraryDependencies += "org.apache.avro" % "avro" % "1.7.4"

同一片段在spark-shell中效果很好。

0 个答案:

没有答案