InternalCompilerException:编译类是通过其他加载器加载的

时间:2018-10-24 09:52:17

标签: scala

我正在使用Scala解释器评估来自配置的Scala语句。

示例代码为:

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.IMain

object BSFTest {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setAppName("TEST")
      .setMaster("local") // spark://127.0.0.1:7077

    val sparkSession = SparkSession.builder()
      .appName("TEST")
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()

    import sparkSession.sql

    sql ("DROP DATABASE IF EXISTS test CASCADE")
    sql(s"CREATE DATABASE test")

    sql ("CREATE TABLE test.box_width (id INT, width INT)")
    sql ("INSERT INTO test.box_width VALUES (1,1), (2,2)")

    sql ("CREATE TABLE test.box_length (id INT, length INT)")
    sql ("INSERT INTO test.box_length VALUES (1,10), (2,20)")

    val widthDF:DataFrame = sql("select *  from  test.box_width")
    val lengthDF = sql("select *  from  test.box_length")

    val settings = new Settings
    settings.usejavacp.value = true
    settings.deprecation.value = true
    settings.embeddedDefaults(this.getClass().getClassLoader())

    val eval = new IMain(settings)
    eval.bind("lengthDF", "org.apache.spark.sql.DataFrame", lengthDF)
    eval.bind("widthDF", "org.apache.spark.sql.DataFrame", widthDF)
    val clazz1 = "lengthDF.join(widthDF, \"id\")" //STATEMENT FROM CONFIGURATION 
    val evaluated = eval.interpret(clazz1)
    val res = eval.valueOfTerm("res0").get.asInstanceOf[DataFrame]
    println("PRINT SCHEMA: " + res.schema) //This statement is running fine
    res.show() //EXCEPTION HERE
  }
}

执行代码时出现以下错误:

lengthDF: org.apache.spark.sql.DataFrame = [id: int, length: int]
widthDF: org.apache.spark.sql.DataFrame = [id: int, width: int]
res0: org.apache.spark.sql.DataFrame = [id: int, length: int ... 1 more field]
PRINT SCHEMA: StructType(StructField(id,IntegerType,true), StructField(length,IntegerType,true), StructField(width,IntegerType,true))
18/10/24 15:08:14 ERROR CodeGenerator: failed to compile: org.codehaus.janino.InternalCompilerException: Compiling "GeneratedClass": Class 'org.apache.spark.sql.catalyst.expressions.codegen.GeneratedClass' was loaded through a different loader
/* 001 */ public java.lang.Object generate(Object[] references) {
/* 002 */   return new SpecificSafeProjection(references);
/* 003 */ }
/* 004 */
/* 005 */ class SpecificSafeProjection extends org.apache.spark.sql.catalyst.expressions.codegen.BaseProjection {
/*....
Caused by: org.codehaus.janino.InternalCompilerException: Class 'org.apache.spark.sql.catalyst.expressions.codegen.GeneratedClass' was loaded through a different loader
    at org.codehaus.janino.SimpleCompiler$2.getDelegate(SimpleCompiler.java:410)
    at org.codehaus.janino.SimpleCompiler$2.accept(SimpleCompiler.java:353)
    at org.codehaus.janino.UnitCompiler.getType(UnitCompiler.java:6130)

FULL LOG

我无法理解,即使res.schema(从DataFrame获取模式)按预期运行时,res.show(从DataFrame检索数据并打印)也会引发异常

版本:

scalaVersion := "2.11.11"
libraryDependencies += "org.apache.spark" %% "spark-core" % "2.2.2"
libraryDependencies += "org.apache.spark" %% "spark-sql" % "2.2.2"
libraryDependencies += "org.apache.spark" %% "spark-hive" % "2.2.2"

我该怎么解决这个问题?

1 个答案:

答案 0 :(得分:0)

我已经解决了这个问题,参考了https://stackoverflow.com/a/6164608/811602

现在,我正在运行时创建和加载类: 这是工作代码

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.IMain

import java.util.concurrent.atomic.AtomicInteger

object DynamicClassLoader {

  val offset = new AtomicInteger()

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setAppName("TEST")
      .setMaster("local") // spark://127.0.0.1:7077

    val sparkSession = SparkSession.builder()
      .appName("TEST")
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()

    import sparkSession.sql

    sql ("DROP DATABASE IF EXISTS test CASCADE")
    sql(s"CREATE DATABASE test")

    sql ("CREATE TABLE test.box_width (id INT, width INT)")
    sql ("INSERT INTO test.box_width VALUES (1,1), (2,2)")

    sql ("CREATE TABLE test.box_length (id INT, length INT)")
    sql ("INSERT INTO test.box_length VALUES (1,10), (2,20)")

    val widthDF = sql("select *  from  test.box_width")
    val lengthDF = sql("select *  from  test.box_length")

    var udfclassName:String = "AClass" + offset.getAndIncrement()
    var statements = """
        | val result = input1.join(input2, "id")
        | return result
        | """.stripMargin
    val srcA = """
      | class """.stripMargin + udfclassName + """ extends SomeTrait {
      |   import org.apache.spark.sql.DataFrame
      |     def someMethod(input1:DataFrame, input2: DataFrame): DataFrame = {
      |     """.stripMargin +
      statements +
      """}
      | }
      """.stripMargin

    val settings = new Settings
    settings.usejavacp.value = true
    settings.deprecation.value = true
    settings.embeddedDefaults(this.getClass().getClassLoader())

    val eval = new IMain(settings)
    eval.compileString(srcA)
    val classA = eval.classLoader.loadClass(udfclassName)
    eval.close()

    val objA = classA.newInstance().asInstanceOf[SomeTrait]
    val resultDF = objA.someMethod(lengthDF, widthDF)
    println(resultDF.schema)
    resultDF.show()
  }
}

trait SomeTrait { def someMethod(input1:DataFrame, input2: DataFrame): DataFrame}

尽管我没有因为发布的问题而被阻止,并想出了实现此目的的替代方法,但问题仍然存在,因为根本的异常原因仍然可以找到并解决