//package com.examples
/**
* Created by kalit_000 on 27/09/2015.
*/
import org.apache.spark.SparkConf
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark._
import java.sql.{ResultSet, DriverManager, Connection}
import kafka.producer.KeyedMessage
import kafka.producer.Producer
import kafka.producer.ProducerConfig
import java.util.Properties
import org.apache.spark.streaming.{Seconds,StreamingContext}
import org.apache.spark._
object SqlServerKafkaProducer {
def main(args: Array[String]): Unit =
{
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
val conf = new SparkConf().setMaster("local[2]").setAppName("MSSQL_KAFKA_PRODUCER")
val sc=new SparkContext(conf)
val driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
val url = "jdbc:sqlserver://localhost;user=admin;password=oracle;database=AdventureWorks2014"
val username = "admin"
val password = "oracle"
var connection: Connection = null
Class.forName(driver)
/*Create connection and statement to run against sql server and execute*/
connection = DriverManager.getConnection(url, username, password)
val statement = connection.createStatement()
val resultSet = statement.executeQuery("select top 10 CustomerID,StoreID,TerritoryID,AccountNumber from AdventureWorks2014.dbo.Customer")
resultSet.setFetchSize(10);
val columnnumber = resultSet.getMetaData().getColumnCount.toInt
/*OP COLUMN NAMES*/
var i = 0.toInt;
for (i <- 1 to columnnumber.toInt)
{
val columnname=resultSet.getMetaData().getColumnName(i)
println("Column Names are:- %s".format(columnname))
}
/*OP DATA*/
while (resultSet.next())
{
var list = new java.util.ArrayList[String]()
for (i <- 1 to columnnumber.toInt)
{
list.add(resultSet.getObject(i).toString())
//println("Column Names are:- %s".format(columnname))
}
println(list)
/*Buils kafka properties file*/
val props:Properties = new Properties()
props.put("metadata.broker.list", "localhost:9092")
props.put("serializer.class", "kafka.serializer.StringEncoder")
/*send message using kafka producer.send to topic trade*/
val config= new ProducerConfig(props)
val producer= new Producer[String,String](config)
//val x=list.collect().mkString("\n").replace("[","").replace("]","").replace(",","~")
producer.send(new KeyedMessage[String, String]("trade", list.toString().replace("[","").replace("]","").replace(",","~")))
}
/*close SQL Server database connection*/
connection.close()
}
}
我在intellijIDEA中使用Maven构建jar这是scala spark项目当我尝试使用命令
运行jar文件时,使用文件夹(C:\Users\kalit_000\IdeaProjects\SparkCookBook\target\SparkCookBook-0.0.1-SNAPSHOT-jar-with-dependencies.jar)
创建jar文件
scala -classpath "C:\Users\kalit_000\IdeaProjects\SparkCookBook\target\SparkCookBook-1.0-SNAPSHOT-jar-with-dependencies.jar" SqlServerKafkaProducer
我收到了错误 错误: -
类路径上没有这样的文件或类:SqlServerKafkaProducer.class
我可以在jar文件中看到我的类,我使用Java反编译软件打开Jar文件。
任何人都可以帮忙吗?
我能够成功编译intellij Idea。