当我尝试在eclipse IDE中基于scala本质构建maven项目时。
获取错误
object sql不是包org.apache.spark
的成员我们尝试了
在pom.xml中添加此依赖项
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>1.6.0</version>
</dependency>
输入代码
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
object MyApp {
def main(args: Array[String]) {
//Read from KAFKA TOPIC
val conf = new SparkConf().setMaster("local[*]").setAppName("Spark-Kafk-Integration")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(5))
val kafkaStream = KafkaUtils.createStream(ssc, "hostname:2181", "spark-streaming-consumer-group", Map("test4" -> 1))
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._
kafkaStream.foreachRDD(rdd => {
rdd.foreach(println)
if(rdd.count()>0) {
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file:///D:/my/")
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file://user/cloudera/testdata")
rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("hdfs://hostname:8020/user/cloudera/testdata")
// rdd.saveAsTextFile("C:/data/spark/")
}
})
完成POM.XML
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.cyb</groupId>
<artifactId>First</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>First</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
输出
我们希望从Kafka主题将流数据写入HDFS存储。
对此有任何帮助将不胜感激?
答案 0 :(得分:0)
您需要导入spark sql库才能使用spark-sql函数。尝试导入此
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.SQLImplicits
import org.apache.spark.sql.SQLContext