import org.apache.spark.SparkContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructType, StructField, StringType, IntegerType};
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql._
val sqlContext = new org.apache.spark.sql.SQLContext(sc);
val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)
val rdd =
sc.textFile("/user/scvappdf/hdpcf/SIT/df/meta/1576373/SDM_Data/RDM/rdm_all_ctry_cd_ver1.txt")
val header = rdd.first()
val rdd1 = rdd.filter(x => x != header)
val rowRDD = rdd1.map(_.split("\\|")).map(p => Row(p(0).trim,
p(1).trim,p(2).trim,p(3).trim,p(4).trim,p(5).trim,p(6).trim,p(7).trim,p(8).trim,p(9).trim,
p(10).trim,p(11).trim,p(12).trim,p(13).trim,p(14).trim,p(15).trim,p(16).trim,p(17).trim,
p(18).trim,p(19).trim,p(20).trim,p(21).trim,p(22).trim,p(23).trim,p(24).trim,p(25).trim,p(26).trim))
val innerStruct = StructType(StructField("rowid", StringType, true)::StructField("s_startdt",
StringType, false)::StructField("s_starttime", StringType, false)::StructField("s_enddt",
StringType, false)::StructField("s_endtime", StringType, false)::StructField("s_deleted_flag",
StringType, false)::StructField("ctry_cd", StringType, false)::StructField("ctry_cd_desc",
StringType, false)::StructField("intrnl_ctry_rgn_cd", StringType, false)::
StructField("intrnl_ctry_rgn_desc", StringType, false)::StructField("iso_ind", StringType,
false)::StructField("iso_alp2_ctry_cd", StringType, false)::StructField("iso_alp3_ctry_cd",
StringType, false)::StructField("iso_num_cd", StringType, false)::StructField("npc_ind", StringType,
false)::StructField("cal_type_cd", StringType, false)::StructField("hemisphere_cd", StringType,
false)::StructField("hemisphere_desc", StringType, false)::StructField("ref_tbl_id", StringType,
false)::StructField("eff_strt_dt", StringType, false)::StructField("eff_end_dt", StringType,
false)::StructField("ent_st", StringType, false)::StructField("vers", StringType,
false)::StructField("crtd_tmst", StringType, false)::StructField("mod_tmst", StringType,
false)::StructField("src_id", StringType, false)::StructField("ods", StringType, false):: Nil)
val peopleSchemaRDD = hiveContext.createDataFrame(rowRDD, innerStruct)
peopleSchemaRDD.write.format("orc").mode("overwrite")
.save("/sit/regenv/hdata/DATAFABRIC/reg_tb_prd_dfab/stg_rdm_all_ctry_cd_ver1/ods=2019-12-06")
以ORC格式保存数据框peopleSchemaRDD时,出现错误“ java.lang.ArrayIndexOutOfBoundsException:3”
我收到此错误,原因是:org.apache.spark.SparkException:写入行时任务失败
创建文件夹时路径没有问题,但是没有数据。我正在使用Spark 1.6.3版本
答案 0 :(得分:0)
SQLContext存在一些问题,您尝试使用HiveContext。使用以下配置可以解决该问题:
spark.sql.orc.impl=native
在Spark提交命令的角度中提供了以上内容。