使用Spark SQL执行UNION ALL操作

时间:2019-08-10 20:47:14

标签: apache-spark hive

我试图通过调用以下预定义的语句对所有联合执行。

`new_records="""select 
sor.EMP_ID,
sor.EMP_NAME,
sor.EMP_STATE,
sor.EMP_PH,
'I' as CDC_IND,
TO_DATE(from_unixtime(unix_timestamp())) as EFCT_DT,
cast('9999-12-31'  as date) as  EXPR_DT
from scd.EMP_SOR sor 
left join scd.EMP_HIST_ACTIVE  
active_hist on   
where active_hist.EMP_ID is NULL"""`

`unchanged_records="""select 
sor.EMP_ID,
sor.EMP_NAME,
sor.EMP_STATE,
sor.EMP_PH,
'N' as CDC_IND,
emp_hist.expr_dt,
emp_hist.efct_dt 
from scd.EMP_SOR sor  
inner join scd.EMP_HIST_ACTIVE emp_hist
on sor.EMP_ID = emp_hist.EMP_ID
where sor.EMP_ID = emp_hist.EMP_ID
and sor.EMP_NAME = emp_hist.EMP_NAME
and sor.EMP_STATE = emp_hist.EMP_NAME
and sor.EMP_PH = emp_hist.EMP_PH"""`

`changed_records="""select
sor.EMP_ID,
sor.EMP_NAME,
sor.EMP_STATE,
sor.EMP_PH,
'U' as CDC_IND,
TO_DATE(from_unixtime(unix_timestamp())) as EFCT_DT,
cast('9999-12-31'  as date) as EXPR_DT 
from scd.EMP_SOR sor inner join scd.EMP_HIST_ACTIVE emp_shit
on sor.EMP_ID = emp_hist.EMP_ID
where sor.EMP_ID <> emp_hist.EMP_ID
or sor.EMP_NAME <> emp_hist.EMP_NAME
or sor.EMP_STATE <> emp_hist.EMP_NAME
or sor.EMP_PH <> emp_hist.EMP_PH"""`

`sqlContext.sql("new_records union all unchanged_records
 union all   changed_records")`

我将所有联合的上述sql称为

应该通过执行所有联合来返回结果,但由于某些原因查询失败并显示以下错误

错误:无法识别'new_records''union''all'附近的输入;第1行pos 0

我不确定我在这里想念的是什么可以帮助我

1 个答案:

答案 0 :(得分:1)

简单的语法错误。


import org.apache.spark.sql.SparkSession

val  new_records="""select
    sor.EMP_ID,
    sor.EMP_NAME,
    sor.EMP_STATE,
    sor.EMP_PH,
    'I' as CDC_IND,
    TO_DATE(from_unixtime(unix_timestamp())) as EFCT_DT,
    cast('9999-12-31'  as date) as  EXPR_DT
    from scd.EMP_SOR sor
    left join scd.EMP_HIST_ACTIVE
    active_hist on
    where active_hist.EMP_ID is NULL"""

val unchanged_records="""select
    sor.EMP_ID,
    sor.EMP_NAME,
    sor.EMP_STATE,
    sor.EMP_PH,
    'N' as CDC_IND,
    emp_hist.expr_dt,
    emp_hist.efct_dt
    from scd.EMP_SOR sor
    inner join scd.EMP_HIST_ACTIVE emp_hist
    on sor.EMP_ID = emp_hist.EMP_ID
    where sor.EMP_ID = emp_hist.EMP_ID
    and sor.EMP_NAME = emp_hist.EMP_NAME
    and sor.EMP_STATE = emp_hist.EMP_NAME
    and sor.EMP_PH = emp_hist.EMP_PH"""


val changed_records="""select
    sor.EMP_ID,
    sor.EMP_NAME,
    sor.EMP_STATE,
    sor.EMP_PH,
    'U' as CDC_IND,
    TO_DATE(from_unixtime(unix_timestamp())) as EFCT_DT,
    cast('9999-12-31'  as date) as EXPR_DT
    from scd.EMP_SOR sor inner join scd.EMP_HIST_ACTIVE emp_shit
    on sor.EMP_ID = emp_hist.EMP_ID
    where sor.EMP_ID <> emp_hist.EMP_ID
    or sor.EMP_NAME <> emp_hist.EMP_NAME
    or sor.EMP_STATE <> emp_hist.EMP_NAME
    or sor.EMP_PH <> emp_hist.EMP_PH"""

    val spark: SparkSession = SparkSession.builder
      .config("spark.master", "local") //.config("spark.eventLog.enabled", "true")
      .appName("uniontest")
      .getOrCreate()
    spark.sql(s"$new_records " +
      s" union all " +
      s"$unchanged_records " +
      s"  union all   $changed_records")

将起作用 还可以看看Why would I want .union over .unionAll in Spark for SchemaRDDs吗?