scala.ScalaReflectionException:JavaMirror中带有ClasspathFilter的类java.sql.Timestamp

时间:2019-02-04 11:06:59

标签: java scala unit-testing apache-spark

我有一个单元测试,基本上可以使用案例类将数据框转换为数据集。

当我使用jdk-8运行该测试时,它通过了,但是当我使用jdk 10或11时,它却无法显示:  “ scala.ScalaReflectionException:JavaMirror中带有ClasspathFilter的类java.sql.Timestamp ” 我没有从失败消息中得到任何其他信息。我的单元测试如下:

  "A dataFrameToDataSet function" should "return DataSet[AssetTagAsset] from dataframe that contains Asset data" in {

val assetTagAssets = Map[Long, AssetTagAsset](
  (
    1L,
    new AssetTagAsset(
      asset_tag_asset_id = 1L,
      asset_tag_asset_uuid = "test",
      asset_id = 1L,
      asset_tag_id = 1L,
      sticky = "abc",
      added_date = Option(Timestamp.valueOf("2018-04-17 10:10:50")),
      added_user_id = Option(1L),
      region = "sjc01",
      environment = "eng",
      pod = "p05"
    )
  )
)

val DecimalType = DataTypes.createDecimalType(38, 10)

val assetTagAssetSchema = List(
  StructField("ASSET_TAG_ASSET_ID", DecimalType, false),
  StructField("ASSET_TAG_ASSET_UUID", StringType, false),
  StructField("ASSET_ID", DecimalType, false),
  StructField("ASSET_TAG_ID", DecimalType, false),
  StructField("STICKY", StringType, false),
  StructField("ADDED_DATE", TimestampType, true),
  StructField("ADDED_USER_ID", DecimalType, true),
  StructField("REGION", StringType, false),
  StructField("ENVIRONMENT", StringType, false),
  StructField("POD", StringType, false)
)

val assetTagAssetRows = assetTagAssets.values
  .map(
    assetTagAsset =>
      Row(
        java.math.BigDecimal.valueOf(assetTagAsset.asset_tag_asset_id),
        assetTagAsset.asset_tag_asset_uuid,
        java.math.BigDecimal.valueOf(assetTagAsset.asset_id),
        java.math.BigDecimal.valueOf(assetTagAsset.asset_tag_id),
        assetTagAsset.sticky,
        assetTagAsset.added_date.orNull,
        toBigDecimal(assetTagAsset.added_user_id),
        assetTagAsset.region,
        assetTagAsset.environment,
        assetTagAsset.pod
    )
  )
  .toSeq

val assetTagAssetDF = spark.createDataFrame(
  spark.sparkContext.parallelize(assetTagAssetRows),
  StructType(assetTagAssetSchema)
)

val ds = dataFrameToDataSet[AssetTagAsset]("oracle_grc_asset_tag_asset", assetTagAssetDF)
assert(ds.count() === assetTagAssets.size)

ds.collect.foreach(assetTagAsset => {
  val expectedAssetTagAsset = assetTagAssets(assetTagAsset.asset_tag_asset_id)
  assert(assetTagAsset.asset_tag_asset_uuid === expectedAssetTagAsset.asset_tag_asset_uuid)
  assert(assetTagAsset.asset_id === expectedAssetTagAsset.asset_id)
  assert(assetTagAsset.asset_tag_id === expectedAssetTagAsset.asset_tag_id)
  assert(assetTagAsset.sticky === expectedAssetTagAsset.sticky)
  assert(assetTagAsset.added_date === expectedAssetTagAsset.added_date)
  assert(assetTagAsset.added_user_id === expectedAssetTagAsset.added_user_id)
  assert(assetTagAsset.region === expectedAssetTagAsset.region)
  assert(assetTagAsset.environment === expectedAssetTagAsset.environment)
  assert(assetTagAsset.pod === expectedAssetTagAsset.pod)
})

}

AssetTagAsset是一个如下的案例类:

case class AssetTagAsset( asset_tag_asset_id: Long, asset_tag_asset_uuid: String, asset_id: Long, asset_tag_id: Long, sticky: String, added_date: Option[java.sql.Timestamp], added_user_id: Option[Long], region: String, environment: String, pod: String ) def apply(row: Row): AssetTagAsset = { new AssetTagAsset( asset_tag_asset_id = row.getAs[java.math.BigDecimal]("ASSET_TAG_ASSET_ID").longValue, asset_tag_asset_uuid = row.getAs[String]("ASSET_TAG_ASSET_UUID"), asset_id = row.getAs[java.math.BigDecimal]("ASSET_ID").longValue, asset_tag_id = row.getAs[java.math.BigDecimal]("ASSET_TAG_ID").longValue, sticky = row.getAs[String]("STICKY"), added_date = getOptionValue(row, "ADDED_DATE"), added_user_id = getOptionValue(row, "ADDED_USER_ID"), region = row.getAs[String]("REGION"), environment = row.getAs[String]("ENVIRONMENT"), pod = row.getAs[String]("POD") ) } }

0 个答案:

没有答案