将Scala映射转换为数据框

时间:2021-01-03 14:10:33

标签: scala apache-spark

我正在尝试使用 Apache Spark 流式传输 Twitter 数据,并且我想将其作为 csv file 保存到 HDFS 中。我知道我必须将其转换为数据帧,但我无法这样做。

这是我的完整代码:

import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.twitter.TwitterUtils
//import com.google.gson.Gson
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
//import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
//import org.apache.spark.sql.functions._
import sentimentAnalysis.sentimentScore

case class twitterCaseClass (userID: String = "", user: String = "", createdAt: String = "", text: String = "", sentimentType: String = "")

object twitterStream {

  //private val gson = new Gson()

  def main(args: Array[String]) {

    //Twitter API
    Logger.getLogger("org").setLevel(Level.ERROR)
    System.setProperty("twitter4j.oauth.consumerKey", "#######")
    System.setProperty("twitter4j.oauth.consumerSecret", "#######")
    System.setProperty("twitter4j.oauth.accessToken", "#######")
    System.setProperty("twitter4j.oauth.accessTokenSecret", "#######")

    val spark = SparkSession.builder().appName("twitterStream").master("local[*]").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    val streamContext = new StreamingContext(sc, Seconds(5))
    import spark.implicits._

    val filters = Array("Singapore")
    val filtered = TwitterUtils.createStream(streamContext, None, filters)
    val englishTweets = filtered.filter(_.getLang() == "en")

    englishTweets.print()

    val tweets = englishTweets.map{ col => {
          (
            "userID" -> col.getId,
            "user" -> col.getUser.getScreenName,
            "createdAt" -> col.getCreatedAt.toInstant.toString,
            "text" -> col.getText.toLowerCase.split(" ").filter(_.matches("^[a-zA-Z0-9 ]+$")).fold("")((a, b) => a + " " + b).trim,
            "sentimentType" -> sentimentScore(col.getText).toString
          )
        }
    }



    //val tweets = englishTweets.map(gson.toJson(_))

    //tweets.saveAsTextFiles("hdfs://localhost:9000/usr/sparkApp/test/")


    streamContext.start()
    streamContext.awaitTermination()

  }
}

我不确定我可能哪里出错了。还有另一种方法是使用 case class。有我可以效仿的好例子吗?

更新 Map函数保存到HDFS的结果是这样的: ((userID,1345940003533312000),(user,rei_yang),(createdAt,2021-01-04T03:47:57Z),(text,just posted a photo singapore),(sentimentType,NEUTRAL))

有没有办法将其编码到数据帧中?

0 个答案:

没有答案