我正在研究使用Spark with Scala的推文的情绪分析问题。我有一个使用逻辑回归模型的工作版本如下:
WITH a as (
SELECT Firstname,ROW_NUMBER() OVER(PARTITION by Firstname, empID ORDER BY Firstname)
AS duplicateRecCount
FROM dbo.tblEmployee
)
--Now Delete Duplicate Records
DELETE
FROM a
WHERE duplicateRecCount > 1
使用逻辑回归,返回完全正常的AUC。但是,当我从逻辑回归切换到val nb = new NaiveBayes()时,我收到以下错误:
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.classification.LogisticRegressionWithSGD
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.{StructType, StructField, StringType, IntegerType};
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.ml.feature.{CountVectorizer, RegexTokenizer, StopWordsRemover}
import org.apache.spark.sql.functions._
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.mllib.evaluation.RegressionMetrics
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
val sqlContext = new SQLContext(sc)
// Sentiment140 training corpus
val trainFile = "s3://someBucket/training.1600000.processed.noemoticon.csv"
val swFile = "s3://someBucket/stopwords.txt"
val tr = sc.textFile(trainFile)
val stopwords: Array[String] = sc.textFile(swFile).flatMap(_.stripMargin.split("\\s+")).collect ++ Array("rt")
val parsed = tr.filter(_.contains("\",\"")).map(_.split("\",\"").map(_.replace("\"", ""))).filter(row => row.forall(_.nonEmpty)).map(row => (row(0).toDouble, row(5))).filter(row => row._1 != 2).map(row => (row._1 / 4, row._2))
val pDF = parsed.toDF("label","tweet")
val tokenizer = new RegexTokenizer().setGaps(false).setPattern("\\p{L}+").setInputCol("tweet").setOutputCol("words")
val filterer = new StopWordsRemover().setStopWords(stopwords).setCaseSensitive(false).setInputCol("words").setOutputCol("filtered")
val countVectorizer = new CountVectorizer().setInputCol("filtered").setOutputCol("features")
val lr = new LogisticRegression().setMaxIter(50).setRegParam(0.2).setElasticNetParam(0.0)
val pipeline = new Pipeline().setStages(Array(tokenizer, filterer, countVectorizer, lr))
val lrModel = pipeline.fit(pDF)
// Now model is made. Lets get some test data...
val testFile = "s3://someBucket/testdata.manual.2009.06.14.csv"
val te = sc.textFile(testFile)
val teparsed = te.filter(_.contains("\",\"")).map(_.split("\",\"").map(_.replace("\"", ""))).filter(row => row.forall(_.nonEmpty)).map(row => (row(0).toDouble, row(5))).filter(row => row._1 != 2).map(row => (row._1 / 4, row._2))
val teDF = teparsed.toDF("label","tweet")
val res = lrModel.transform(teDF)
val restup = res.select("label","prediction").rdd.map(r => (r(1).asInstanceOf[Double], r(0).asInstanceOf[Double]))
val metrics = new BinaryClassificationMetrics(restup)
metrics.areaUnderROC()
在咨询MLlib PipelineStage列表中的API文档时,逻辑回归和Naive Bayes都列为子类。那么为什么LR工作而不是NB?
答案 0 :(得分:3)