我有一个非常嵌套的数据框,我试图压扁。原始架构如下所示:
|-- _History: struct (nullable = true)
| |-- Article: array (nullable = true)
| | |-- element: struct (containsNull = true)
| | | |-- Id: string (nullable = true)
| | | |-- Timestamp: long (nullable = true)
| |-- Channel: struct (nullable = true)
| | |-- Music: array (nullable = true)
| | | |-- element: long (containsNull = true)
| | |-- Sports: array (nullable = true)
| | | |-- element: long (containsNull = true)
| | |-- Style: array (nullable = true)
| | | |-- element: long (containsNull = true)
我能够使用递归函数展平大多数字段:
implicit class DataFrameFlattener(df: DataFrame) {
def flattenSchema: DataFrame = {
df.select(flatten(Nil, df.schema): _*)
}
protected def flatten(path: Seq[String], schema: DataType): Seq[Column] = schema match {
case s: StructType => s.fields.flatMap(f => flatten(path :+ f.name, f.dataType))
case other => col(path.map(n => s"`$n`").mkString(".")).as(path.mkString(".")) :: Nil
}
}
但是,这似乎无法在上面的架构中展平_History.Article.Id
和_History.Article.Timstamp
。为什么会这样,如何将这两个字段展平到数据框中的各自列中?
答案 0 :(得分:0)
我找到了一个解决方法:创建两个扁平字段的新列:
val flatDF = df
.withColumn("_History.Article.Id", df("`_History.Article`.Id")
.withColumn("_History.Article.Timestamp", df("`_History.Article`.Timestamp")
答案 1 :(得分:0)
使用scala spark,您可以递归地将json扁平化:
import org.apache.spark.sql.{ Row, SaveMode, SparkSession, DataFrame }
def recurs(df: DataFrame): DataFrame = {
if(df.schema.fields.find(_.dataType match {
case ArrayType(StructType(_),_) | StructType(_) => true
case _ => false
}).isEmpty) df
else {
val columns = df.schema.fields.map(f => f.dataType match {
case _: ArrayType => explode(col(f.name)).as(f.name)
case s: StructType => col(s"${f.name}.*")
case _ => col(f.name)
})
recurs(df.select(columns:_*))
}
}
val df = spark.read.json(json_location)
flatten_df = recurs(df)
flatten_df.show()
这将在垂直列中创建数组。
#如果您不希望将数组附加到另一行,则还有另一个:
def flattenDataframe(df: DataFrame): DataFrame = {
//getting all the fields from schema
val fields = df.schema.fields
val fieldNames = fields.map(x => x.name)
//length shows the number of fields inside dataframe
val length = fields.length
for (i <- 0 to fields.length - 1) {
val field = fields(i)
val fieldtype = field.dataType
val fieldName = field.name
fieldtype match {
case arrayType: ArrayType =>
val fieldName1 = fieldName
val fieldNamesExcludingArray = fieldNames.filter(_ != fieldName1)
val fieldNamesAndExplode = fieldNamesExcludingArray ++ Array(s"explode_outer($fieldName1) as $fieldName1")
//val fieldNamesToSelect = (fieldNamesExcludingArray ++ Array(s"$fieldName1.*"))
val explodedDf = df.selectExpr(fieldNamesAndExplode: _*)
return flattenDataframe(explodedDf)
case structType: StructType =>
val childFieldnames = structType.fieldNames.map(childname => fieldName + "." + childname)
val newfieldNames = fieldNames.filter(_ != fieldName) ++ childFieldnames
val renamedcols = newfieldNames.map(x => (col(x.toString()).as(x.toString().replace(".", "_").replace("$", "_").replace("__", "_").replace(" ", "").replace("-", ""))))
val explodedf = df.select(renamedcols: _*)
return flattenDataframe(explodedf)
case _ =>
}
}
df
}
就像上一个一样调用它,如果我错过了,请导入一些库。