Spark 2.0 - 将DataFrame转换为DataSet

时间:2016-11-20 02:56:25

标签: scala apache-spark dataframe dataset

我想加载我的数据并对其进行一些基本的线性回归。首先,我需要使用VectorAssembler来生成我的功能列。但是,当我使用assembler.transform(df)时,dfDataFrame,并且它需要DataSet。我尝试了df.toDS,但它提供了value toDS is not a member of org.apache.spark.sql.DataFrame。实际上,它是org.apache.spark.sql.DatasetHolder的成员。

我在这里弄错了什么?

package main.scala

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions._
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.DatasetHolder
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.Vectors

object Analyzer {
  def main(args: Array[String]) {

    val conf = new SparkConf()
    val sc   = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)    
    import sqlContext.implicits._

    val df = sqlContext.read
    .format("com.databricks.spark.csv")
    .option("header", "false")
    .option("delimiter", "\t")
    .option("parserLib", "UNIVOCITY")  
    .option("inferSchema", "true")
    .load("data/snap/*")

    val assembler = new VectorAssembler()
    .setInputCols(Array("own", "want", "wish", "trade", "comment"))
    .setOutputCol("features")

    val df1 = assembler.transform(df)

    val formula = new RFormula().setFormula("rank ~ own + want + wish + trade + comment")
    .setFeaturesCol("features")
        .setLabelCol("rank")
}
}

1 个答案:

答案 0 :(得分:2)

显然问题是因为我仍然使用Spark 1.6样式的SparkSQLContext。我针对SparkSession进行了更改,transform()能够隐式接受DataFrame

package main.scala

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Dataset
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.Vectors

object Analyzer {
    def main(args: Array[String]) {

        val spark = SparkSession.builder().getOrCreate()
        import spark.implicits._

        val df = spark.read
        .format("com.databricks.spark.csv")
        .option("header", "false")
        .option("delimiter", "\t")
        .option("parserLib", "UNIVOCITY")  
        .option("inferSchema", "true")
        .load("data/snap/*")        

        df.show()

        val assembler = new VectorAssembler()
        .setInputCols(Array("own", "want", "wish", "trade", "comment"))
        .setOutputCol("features")

        val df1 = assembler.transform(df)
   }
}