Spark:使用Spark绘制模型的学习曲线

时间:2018-06-26 08:41:25

标签: scala apache-spark machine-learning

我正在使用Spark,我想训练机器学习模型。

由于结果不佳,我想在训练的每个时期(在训练和测试数据集上)显示模型所产生的错误。

然后我将使用此信息来确定我的模型是数据的拟合不足还是过度拟合。

问题:如何绘制带有火花的模型的学习曲线?

在下面的示例中,我实现了自己的评估程序并重写了评估方法以打印所需的指标,但仅显示了两个值(maxIter = 1000)。

MinimalRunnableCode.scala:

import org.apache.spark.SparkConf
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit}
import org.apache.spark.sql.SparkSession

object Min extends App {

  // Open spark session.
  val conf = new SparkConf()
    .setMaster("local")
    .set("spark.network.timeout", "800")

  val ss = SparkSession.builder
    .config(conf)
    .getOrCreate

  // Load data.
  val data = ss.createDataFrame(ss.sparkContext.parallelize(
      List(
        (Vectors.dense(1, 2), 1),
        (Vectors.dense(1, 3), 2),
        (Vectors.dense(1, 2), 1),
        (Vectors.dense(1, 3), 2),
        (Vectors.dense(1, 2), 1),
        (Vectors.dense(1, 3), 2),
        (Vectors.dense(1, 2), 1),
        (Vectors.dense(1, 3), 2),
        (Vectors.dense(1, 2), 1),
        (Vectors.dense(1, 3), 2),
        (Vectors.dense(1, 4), 3)
      )
    ))
    .withColumnRenamed("_1", "features")
    .withColumnRenamed("_2", "label")

  val Array(training, test) = data.randomSplit(Array(0.8, 0.2), seed = 42)

  // Create model of linear regression.
  val lr = new LinearRegression().setMaxIter(1000)

  // Create parameters grid that will be used to train different version of the linear model.
  val paramGrid = new ParamGridBuilder()
    .addGrid(lr.regParam, Array(0.001))
    .addGrid(lr.fitIntercept)
    .addGrid(lr.elasticNetParam, Array(0.5))
    .build()

  // Create trainer using validation split to evaluate which set of parameters performs the best.
  val trainValidationSplit = new TrainValidationSplit()
    .setEstimator(lr)
    .setEvaluator(new CustomRegressionEvaluator)
    .setEstimatorParamMaps(paramGrid)
    .setTrainRatio(0.8) // 80% of the data will be used for training and the remaining 20% for validation.

  // Run train validation split, and choose the best set of parameters.
  var model = trainValidationSplit.fit(training)

  // Close spark session.
  ss.stop()
}

CustomRegressionEvaluator.scala:

import org.apache.spark.ml.evaluation.{Evaluator, RegressionEvaluator}
import org.apache.spark.ml.param.{Param, ParamMap, Params}
import org.apache.spark.ml.util.{DefaultParamsReadable, DefaultParamsWritable, Identifiable}
import org.apache.spark.mllib.evaluation.RegressionMetrics
import org.apache.spark.sql.{Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._

final class CustomRegressionEvaluator (override val uid: String) extends Evaluator with HasPredictionCol with HasLabelCol with DefaultParamsWritable {

  def this() = this(Identifiable.randomUID("regEval"))

  def checkNumericType(
                        schema: StructType,
                        colName: String,
                        msg: String = ""): Unit = {
    val actualDataType = schema(colName).dataType
    val message = if (msg != null && msg.trim.length > 0) " " + msg else ""
    require(actualDataType.isInstanceOf[NumericType], s"Column $colName must be of type " +
      s"NumericType but was actually of type $actualDataType.$message")
  }

  def checkColumnTypes(
                        schema: StructType,
                        colName: String,
                        dataTypes: Seq[DataType],
                        msg: String = ""): Unit = {
    val actualDataType = schema(colName).dataType
    val message = if (msg != null && msg.trim.length > 0) " " + msg else ""
    require(dataTypes.exists(actualDataType.equals),
      s"Column $colName must be of type equal to one of the following types: " +
        s"${dataTypes.mkString("[", ", ", "]")} but was actually of type $actualDataType.$message")
  }

  var i = 0 // count the number of time the evaluate method is called
  override def evaluate(dataset: Dataset[_]): Double = {
    val schema = dataset.schema
    checkColumnTypes(schema, $(predictionCol), Seq(DoubleType, FloatType))
    checkNumericType(schema, $(labelCol))

    val predictionAndLabels = dataset
      .select(col($(predictionCol)).cast(DoubleType), col($(labelCol)).cast(DoubleType))
      .rdd
      .map { case Row(prediction: Double, label: Double) => (prediction, label) }
    val metrics = new RegressionMetrics(predictionAndLabels)
    val metric = "mae" match {
      case "rmse" => metrics.rootMeanSquaredError
      case "mse" => metrics.meanSquaredError
      case "r2" => metrics.r2
      case "mae" => metrics.meanAbsoluteError
    }
    println(s"$i $metric") // Print the metrics
    i = i + 1 // Update counter
    metric
  }

  override def copy(extra: ParamMap): RegressionEvaluator = defaultCopy(extra)
}

object RegressionEvaluator extends DefaultParamsReadable[RegressionEvaluator] {

  override def load(path: String): RegressionEvaluator = super.load(path)
}

private[ml] trait HasPredictionCol extends Params {

  /**
    * Param for prediction column name.
    * @group param
    */
  final val predictionCol: Param[String] = new Param[String](this, "predictionCol", "prediction column name")

  setDefault(predictionCol, "prediction")

  /** @group getParam */
  final def getPredictionCol: String = $(predictionCol)
}

private[ml] trait HasLabelCol extends Params {

  /**
    * Param for label column name.
    * @group param
    */
  final val labelCol: Param[String] = new Param[String](this, "labelCol", "label column name")

  setDefault(labelCol, "label")

  /** @group getParam */
  final def getLabelCol: String = $(labelCol)
}

1 个答案:

答案 0 :(得分:5)

这是针对LinearRegression的特定情况以及支持客观历史的任何其他算法的可能解决方案(在这种情况下,LinearRegressionTrainingSummary可以完成工作)。 / p>

首先创建一个可验证的最小示例和完整示例

import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit}
import org.apache.spark.mllib.util.{LinearDataGenerator, MLUtils}
import org.apache.spark.sql.SparkSession

val spark: SparkSession = SparkSession.builder().getOrCreate()

import org.apache.spark.ml.evaluation.RegressionEvaluator
import spark.implicits._

val data = {
  val tmp = LinearDataGenerator.generateLinearRDD(
    spark.sparkContext,
    nexamples = 10000,
    nfeatures = 4,
    eps = 0.05
  ).toDF

  MLUtils.convertVectorColumnsToML(tmp, "features")
}

正如您所注意到的那样,当您想为spark-mllibspark-ml生成用于测试目的的数据时,建议使用数据生成器。

现在,让我们训练一个线性回归器:

// Create model of linear regression.
val lr = new LinearRegression().setMaxIter(1000)

// The following line will create two sets of parameters
val paramGrid = new ParamGridBuilder().addGrid(lr.regParam, Array(0.001)).addGrid(lr.fitIntercept).addGrid(lr.elasticNetParam, Array(0.5)).build()

// Create trainer using validation split to evaluate which set of parameters performs the best.
// I'm using the regular RegressionEvaluator here
val trainValidationSplit = new TrainValidationSplit()
  .setEstimator(lr)
  .setEvaluator(new RegressionEvaluator)
  .setEstimatorParamMaps(paramGrid)
  .setTrainRatio(0.8) // 80% of the data will be used for training and the remaining 20% for validation.

// To retrieve subModels, make sure to set collectSubModels to true before fitting.
trainValidationSplit.setCollectSubModels(true)
// Run train validation split, and choose the best set of parameters.
var model = trainValidationSplit.fit(data)

现在,由于我们的模型已经过训练,因此我们需要的只是获取客观历史记录。

以下部分需要在模型和子模型对象参数之间进行一些体操训练。

如果您有Pipeline左右的代码,则需要修改此代码,因此请谨慎使用。只是一个例子:

val objectiveHist = spark.sparkContext.parallelize(
  model.subModels.zip(model.getEstimatorParamMaps).map {
    case (m: LinearRegressionModel, pm: ParamMap) =>
      val history: Array[Double] = m.summary.objectiveHistory
      val idx: Seq[Int] = 1 until history.length
      // regParam, elasticNetParam, fitIntercept
      val parameters = pm.toSeq.map(pair => (pair.param.name, pair.value.toString)) match {
        case Seq(x, y, z) => (x._2, y._2, z._2)
      }
      (parameters._1, parameters._2, parameters._3, idx.zip(history).toMap)
  }).toDF("regParam", "elasticNetParam", "fitIntercept", "objectiveHistory")

我们现在可以检查这些指标:

objectiveHist.show(false)
// +--------+---------------+------------+-------------------------------------------------------------------------------------------------------+
// |regParam|elasticNetParam|fitIntercept|objectiveHistory                                                                                       |
// +--------+---------------+------------+-------------------------------------------------------------------------------------------------------+
// |0.001   |0.5            |true        |[1 -> 0.4999999999999999, 2 -> 0.4038796441909531, 3 -> 0.02659222058006269, 4 -> 0.026592220340980147]|
// |0.001   |0.5            |false       |[1 -> 0.5000637621421942, 2 -> 0.4039303922115196, 3 -> 0.026592220673025396, 4 -> 0.02659222039347222]|
// +--------+---------------+------------+-------------------------------------------------------------------------------------------------------+

您会注意到训练过程实际上在4次迭代后停止。

如果只需要迭代次数,则可以执行以下操作:

val objectiveHist2 = spark.sparkContext.parallelize(
  model.subModels.zip(model.getEstimatorParamMaps).map {
    case (m: LinearRegressionModel, pm: ParamMap) =>
      val history: Array[Double] = m.summary.objectiveHistory
      // regParam, elasticNetParam, fitIntercept
      val parameters = pm.toSeq.map(pair => (pair.param.name, pair.value.toString)) match {
        case Seq(x, y, z) => (x._2, y._2, z._2)
      }
      (parameters._1, parameters._2, parameters._3, history.size)
  }).toDF("regParam", "elasticNetParam", "fitIntercept", "iterations")

为了演示,我更改了生成器(nfeatures = 100)中的功能数量:

objectiveHist2.show
// +--------+---------------+------------+----------+
// |regParam|elasticNetParam|fitIntercept|iterations|
// +--------+---------------+------------+----------+
// |   0.001|            0.5|        true|        11|
// |   0.001|            0.5|       false|        11|
// +--------+---------------+------------+----------+