ClassNotFoundException spark-submit scala

时间:2016-05-10 05:48:14

标签: scala apache-spark

您好我正在尝试生成Salt Examples的输出但不使用其文档中提到的docker。我找到了有助于生成Main.scala输出的scala代码。我把Main.scala修改为一个方便的,

package BinExTest
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.Row

import software.uncharted.salt.core.projection.numeric._
import software.uncharted.salt.core.generation.request._
import software.uncharted.salt.core.generation.Series
import software.uncharted.salt.core.generation.TileGenerator
import software.uncharted.salt.core.generation.output.SeriesData
import software.uncharted.salt.core.analytic.numeric._

import java.io._

import scala.util.parsing.json.JSONObject

object Main {

  // Defines the tile size in both x and y bin dimensions
  val tileSize = 256

  // Defines the output layer name
  val layerName = "pickups"

  // Creates and returns an Array of Double values encoded as 64bit Integers
  def createByteBuffer(tile: SeriesData[(Int, Int, Int), (Int, Int), Double, (Double, Double)]): Array[Byte] = {
    val byteArray = new Array[Byte](tileSize * tileSize * 8)
    var j = 0
    tile.bins.foreach(b => {
      val data = java.lang.Double.doubleToLongBits(b)
      for (i <- 0 to 7) {
        byteArray(j) = ((data >> (i * 8)) & 0xff).asInstanceOf[Byte]
        j += 1
      }
    })
    byteArray
  }

  def main(args: Array[String]): Unit = {

    val jarFile = "/home/kesava/Studies/BinExTest/BinExTest.jar"; 
    val inputPath = "/home/kesava/Downloads/taxi_micro.csv"
    val outputPath = "/home/kesava/SoftWares/salt/salt-examples/bin-example/Output"

    val conf = new SparkConf().setAppName("salt-bin-example").setJars(Array(jarFile))
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    sqlContext.read.format("com.databricks.spark.csv")
      .option("header", "true")
      .option("inferSchema", "true")
      .load(s"file://$inputPath")
      .registerTempTable("taxi_micro")

    // Construct an RDD of Rows containing only the fields we need. Cache the result
    val input = sqlContext.sql("select pickup_lon, pickup_lat from taxi_micro")
      .rdd.cache()

    // Given an input row, return pickup longitude, latitude as a tuple
    val pickupExtractor = (r: Row) => {
      if (r.isNullAt(0) || r.isNullAt(1)) {
        None
      } else {
        Some((r.getDouble(0), r.getDouble(1)))
      }
    }

    // Tile Generator object, which houses the generation logic
    val gen = TileGenerator(sc)

    // Break levels into batches. Process several higher levels at once because the
    // number of tile outputs is quite low. Lower levels done individually due to high tile counts.
    val levelBatches = List(List(0, 1, 2, 3, 4, 5, 6, 7, 8), List(9, 10, 11), List(12), List(13), List(14))

    // Iterate over sets of levels to generate.
    val levelMeta = levelBatches.map(level => {

      println("------------------------------")
      println(s"Generating level $level")
      println("------------------------------")

      // Construct the definition of the tiling jobs: pickups
      val pickups = new Series((tileSize - 1, tileSize - 1),
        pickupExtractor,
        new MercatorProjection(level),
        (r: Row) => Some(1),
        CountAggregator,
        Some(MinMaxAggregator))

      // Create a request for all tiles on these levels, generate
      val request = new TileLevelRequest(level, (coord: (Int, Int, Int)) => coord._1)
      val rdd = gen.generate(input, pickups, request)

      // Translate RDD of Tiles to RDD of (coordinate,byte array), collect to master for serialization
      val output = rdd
        .map(s => pickups(s).get)
        .map(tile => {
          // Return tuples of tile coordinate, byte array
          (tile.coords, createByteBuffer(tile))
        })
        .collect()

      // Save byte files to local filesystem
      output.foreach(tile => {
        val coord = tile._1
        val byteArray = tile._2
        val limit = (1 << coord._1) - 1
        // Use standard TMS path structure and file naming
        val file = new File(s"$outputPath/$layerName/${coord._1}/${coord._2}/${limit - coord._3}.bins")
        file.getParentFile.mkdirs()
        val output = new FileOutputStream(file)
        output.write(byteArray)
        output.close()
      })

      // Create map from each level to min / max values.
      rdd
        .map(s => pickups(s).get)
        .map(t => (t.coords._1.toString, t.tileMeta.get))
        .reduceByKey((l, r) => {
          (Math.min(l._1, r._1), Math.max(l._2, r._2))
        })
        .mapValues(minMax => {
          JSONObject(Map(
            "min" -> minMax._1,
            "max" -> minMax._2
          ))
        })
        .collect()
        .toMap
    })

    // Flatten array of maps into a single map
    val levelInfoJSON = JSONObject(levelMeta.reduce(_ ++ _)).toString()
    // Save level metadata to filesystem
    val pw = new PrintWriter(s"$outputPath/$layerName/meta.json")
    pw.write(levelInfoJSON)
    pw.close()

  }
}

我为这个scala创建了一个单独的文件夹,其中另一个文件夹名为lib,其中包含所需的jar文件,我用 scalac 编译它,如下所示,

  

scalac -cp&#34; lib / salt.jar:lib / spark.jar&#34; Main.scala

这成功运行并在BinExTest文件夹下生成了类。

现在,该项目的build.gradle具有以下代码行,用于确定这是有助于生成输出数据集的命令,

task run(overwrite: true, type: Exec, dependsOn: [assemble]) {
  executable = 'spark-submit'
  args = ["--class","software.uncharted.salt.examples.bin.Main","/opt/salt/build/libs/salt-bin-example-${version}.jar", "/opt/data/taxi_one_day.csv", "/opt/output"]
}

看到这个,我做了以下命令,

  

spark-submit --class BinExTest.Main lib / salt.jar

当我这样做时,我收到以下错误,

  

java.lang.ClassNotFoundException:Main.BinExTest at   java.net.URLClassLoader $ 1.run(URLClassLoader.java:366)at   java.net.URLClassLoader $ 1.run(URLClassLoader.java:355)at   java.security.AccessController.doPrivileged(Native Method)at   java.net.URLClassLoader.findClass(URLClassLoader.java:354)at   java.lang.ClassLoader.loadClass(ClassLoader.java:425)at   java.lang.ClassLoader.loadClass(ClassLoader.java:358)at   java.lang.Class.forName0(Native Method)at   java.lang.Class.forName(Class.java:278)at   org.apache.spark.util.Utils $ .classForName(Utils.scala:174)at at   org.apache.spark.deploy.SparkSubmit $ .ORG $阿帕奇$火花$部署$ SparkSubmit $$ runMain(SparkSubmit.scala:689)     在   org.apache.spark.deploy.SparkSubmit $ .doRunMain $ 1(SparkSubmit.scala:181)     在org.apache.spark.deploy.SparkSubmit $ .submit(SparkSubmit.scala:206)     在org.apache.spark.deploy.SparkSubmit $ .main(SparkSubmit.scala:121)     在org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

有人可以帮助我吗?我对此完全陌生,只是通过探索来实现这一目标。

[更新1]

接受YoYo的建议,

  

spark-submit --class BinExTest.Main --jars&#34; BinExTest.jar&#34; &#34; LIB / salt.jar&#34;

我得到了ClassNotFoundException,产生了新的错误,如下所示,

  

线程中的异常&#34; main&#34; org.apache.spark.SparkException:Job   由于阶段失败而中止:阶段3.0中的任务1失败1次,大多数   最近的失败:阶段3.0中失去的任务1.0(TID 6,localhost):   java.lang.NoSuchMethodError:   scala.runtime.IntRef.create(I)Lscala /运行/ INTREF;在   BinExTest.Main $ .createByteBuffer(Main.scala:29)at   BinExTest.Main $$ anonfun $ 2 $$ anonfun $ 6.apply(Main.scala:101)at at   BinExTest.Main $$ anonfun $ 2 $$ anonfun $ 6.apply(Main.scala:99)at at   scala.collection.Iterator $$ anon $ 11.next(Iterator.scala:328)at   scala.collection.Iterator $ class.foreach(Iterator.scala:727)at   scala.collection.AbstractIterator.foreach(Iterator.scala:1157)at   scala.collection.generic.Growable $类$加$加$ EQ(Growable.scala:48)。     在   scala.collection.mutable.ArrayBuffer $加$加$ EQ(ArrayBuffer.scala:103)。     在   scala.collection.mutable.ArrayBuffer $加$加$ EQ(ArrayBuffer.scala:47)。     在   scala.collection.TraversableOnce $ class.to(TraversableOnce.scala:273)     在scala.collection.AbstractIterator.to(Iterator.scala:1157)at   scala.collection.TraversableOnce $ class.toBuffer(TraversableOnce.scala:265)     在scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)     在   scala.collection.TraversableOnce $ class.toArray(TraversableOnce.scala:252)     在scala.collection.AbstractIterator.toArray(Iterator.scala:1157)at   org.apache.spark.rdd.RDD $$ anonfun $收集$ 1 $$ anonfun $ 12.apply(RDD.scala:927)     在   org.apache.spark.rdd.RDD $$ anonfun $收集$ 1 $$ anonfun $ 12.apply(RDD.scala:927)     在   org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext.scala:1858)     在   org.apache.spark.SparkContext $$ anonfun $ runJob $ 5.apply(SparkContext.scala:1858)     在org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)     在org.apache.spark.scheduler.Task.run(Task.scala:89)at   org.apache.spark.executor.Executor $ TaskRunner.run(Executor.scala:214)     在   java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)     在   java.util.concurrent.ThreadPoolExecutor中的$ Worker.run(ThreadPoolExecutor.java:615)     在java.lang.Thread.run(Thread.java:745)

知道发生了什么事吗?

[更新2]

使用Scala2.11支持从源代码构建Spark解决了我之前的问题。但是我收到了一个新错误,

  

6/05/10 18:39:15错误TaskSetManager:阶段2.0中的任务0失败1   倍;中止作业线程中的异常&#34; main&#34;   org.apache.spark.SparkException:作业因阶段失败而中止:   阶段2.0中的任务0失败1次,最近失败:丢失任务0.0   在阶段2.0(TID 3,localhost):java.lang.NoClassDefFoundError:   scala / collection / GenTraversableOnce $ class at   。software.uncharted.salt.core.util.SparseArray(SparseArray.scala:37)     在   。software.uncharted.salt.core.util.SparseArray(SparseArray.scala:57)     在   software.uncharted.salt.core.generation.rdd.RDDSeriesWrapper.makeBins(RDDTileGenerator.scala:224)     在   software.uncharted.salt.core.generation.rdd.RDDTileGeneratorCombiner.createCombiner(RDDTileGenerator.scala:128)     在   software.uncharted.salt.core.generation.rdd.RDDTileGenerator $$ anonfun $ 3.apply(RDDTileGenerator.scala:100)     在   software.uncharted.salt.core.generation.rdd.RDDTileGenerator $$ anonfun $ 3.apply(RDDTileGenerator.scala:100)     在   org.apache.spark.util.collection.ExternalSorter $$ anonfun $ 5.apply(ExternalSorter.scala:187)     在   org.apache.spark.util.collection.ExternalSorter $$ anonfun $ 5.apply(ExternalSorter.scala:186)     在   org.apache.spark.util.collection.AppendOnlyMap.changeValue(AppendOnlyMap.scala:148)     在   org.apache.spark.util.collection.SizeTrackingAppendOnlyMap.changeValue(SizeTrackingAppendOnlyMap.scala:32)     在   org.apache.spark.util.collection.ExternalSorter.insertAll(ExternalSorter.scala:192)     在   org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:64)     在   org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)     在   org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)     在org.apache.spark.scheduler.Task.run(Task.scala:89)at   org.apache.spark.executor.Executor $ TaskRunner.run(Executor.scala:214)     在   java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)     在   java.util.concurrent.ThreadPoolExecutor中的$ Worker.run(ThreadPoolExecutor.java:615)     在java.lang.Thread.run(Thread.java:745)引起:   抛出java.lang.ClassNotFoundException:   scala.collection.GenTraversableOnce $ class at   java.net.URLClassLoader $ 1.run(URLClassLoader.java:366)at   java.net.URLClassLoader $ 1.run(URLClassLoader.java:355)at   java.security.AccessController.doPrivileged(Native Method)at   java.net.URLClassLoader.findClass(URLClassLoader.java:354)at   java.lang.ClassLoader.loadClass(ClassLoader.java:425)at   java.lang.ClassLoader.loadClass(ClassLoader.java:358)

这是因为scala2.11没有提到的类吗?

[最终更新]

将scala2.10添加到spark-submit中就可以了。

  

spark-submit --class&#34; BinExTest.Main&#34; --jars   &#34; BinExTest.jar,LIB / scala210.jar&#34; &#34; LIB / salt.jar&#34;

1 个答案:

答案 0 :(得分:3)

要运行Spark作业,需要在构成spark群集的不同节点上自行复制代码。它通过将jar文件复制到其他节点来实现。

这意味着您需要确保您的类文件打包在.jar文件中。在我的典型解决方案中,我将构建一个Uber jar,它将类文件和依赖jar文件打包在一个.jar文件中。为此,我使用Maven Shade plugin。这不一定是你的解决方案,但至少你应该从你生成的类中构建一个.jar文件。

要手动提供额外的jar文件 - 您需要使用--jars选项添加它们,这将使用逗号分隔列表。

更新1

实际上,即使对我来说,所有可用选项都存在很多混淆,特别是jar文件及其分布方式,或修改spark中的类路径。 See another topic I just posted

更新2

对于问题的第二部分已在another thread上回答。