线程" main"中的例外情况java.lang.IllegalArgumentException:Field" features"不存在

时间:2017-06-13 05:13:01

标签: apache-spark apache-spark-ml

Exception in thread "main" java.lang.IllegalArgumentException: Field "features" does not exist.
    at org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:264)
    at org.apache.spark.sql.types.StructType$$anonfun$apply$1.apply(StructType.scala:264)
    at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
    at scala.collection.AbstractMap.getOrElse(Map.scala:59)
    at org.apache.spark.sql.types.StructType.apply(StructType.scala:263)
    at org.apache.spark.ml.util.SchemaUtils$.checkColumnType(SchemaUtils.scala:40)
    at org.apache.spark.ml.clustering.KMeansParams$class.validateAndTransformSchema(KMeans.scala:92)
    at org.apache.spark.ml.clustering.KMeans.validateAndTransformSchema(KMeans.scala:253)
    at org.apache.spark.ml.clustering.KMeans.transformSchema(KMeans.scala:330)
    at org.apache.spark.ml.PipelineStage.transformSchema(Pipeline.scala:74)
    at org.apache.spark.ml.clustering.KMeans.fit(KMeans.scala:304)
    at sparkExample.spExample.ClusteringDSPOC.main(ClusteringDSPOC.java:45)
17

我的代码是

package sparkExample.spExample;

import java.util.Properties;
import java.util.regex.Pattern;

import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.ml.clustering.KMeans;
import org.apache.spark.ml.clustering.KMeansModel;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

public class ClusteringDSPOC {

    private static final Pattern SPACE = Pattern.compile(" ");
    private static final SparkContext sc = new SparkContext(new SparkConf().setAppName("SparkJdbcDs").setMaster("local[*]"));
    private static final String POSTGRESQL_DRIVER = "org.postgresql.Driver";
    private static final String POSTGRESQL_USERNAME = "xyz";
    private static final String POSTGRESQL_PWD = "xyz";
    private static final String POSTGRESQL_CONNECTION_URL = "jdbc:postgresql://192.168.111.130:5432/xyzdb?user=" + POSTGRESQL_USERNAME + "&password=" + POSTGRESQL_PWD;
    private static final String POSTGRESQL_TABLE = "(select id, duration from abc where duration is not null ) as abc";

    public static void main(String[] args) throws Exception {
        //Datasource options
        SparkSession spark = SparkSession.builder().appName("JavaKMeansExample").getOrCreate();
        Class.forName(POSTGRESQL_DRIVER);  
        Properties options = new Properties();
        Dataset<Row> sdrDS = spark.read().format("libsvm").jdbc(POSTGRESQL_CONNECTION_URL, POSTGRESQL_TABLE, options);

        Dataset<Row> durationDS = sdrDS.select("duration");


        KMeans kmeans = new KMeans().setK(2).setSeed(1L);
        KMeansModel model = kmeans.fit(durationDS);
    }
}

我正在关注此事 https://spark.apache.org/docs/latest/ml-clustering.html

调用fit方法时会出现此错误。请帮我修复此问题,或者使用其他选项来执行此操作。谢谢

在这里,我试图将持续时间分成2到3个簇,然后使用id来映射簇。我可以通过这种方式使用Spark mllib库来实现这一点

package sparkExample.spExample;

import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.clustering.KMeans;
import org.apache.spark.mllib.clustering.KMeansModel;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;

public class ClusteringPOC1 {

    private static final Pattern SPACE = Pattern.compile(" ");
    private static final JavaSparkContext sc = new JavaSparkContext(new SparkConf().setAppName("SparkJdbcDs").setMaster("local[*]"));
    private static final String POSTGRESQL_DRIVER = "org.postgresql.Driver";
    private static final String POSTGRESQL_USERNAME = "abc";
    private static final String POSTGRESQL_PWD = "abc";
    private static final String POSTGRESQL_CONNECTION_URL = "jdbc:postgresql://192.168.111.130:5432/abcdb?user=" + POSTGRESQL_USERNAME + "&password=" + POSTGRESQL_PWD;

    private static final SQLContext sqlContext = new SQLContext(sc);
    public static void main(String[] args) throws Exception {
        //Datasource options
        Map<String, String> options = new HashMap<String, String>();
        options.put("driver", POSTGRESQL_DRIVER);
        options.put("url", POSTGRESQL_CONNECTION_URL);
        options.put("dbtable", "(select id, duration from sdr_log where duration is not null ) as sdr_log");
        Dataset<Row> sdrDF = sqlContext.load("jdbc", options);
        JavaRDD<Row> sdrData = sdrDF.toJavaRDD();
        sdrData.cache();
        JavaRDD<Vector> durationData = sdrData.map(row -> {
            double value = new Double(row.get(2).toString());
            return Vectors.dense(value);
        });
        durationData.cache();

        KMeansModel clusters = KMeans.train(durationData.rdd(), numClusters, numIterations);
        JavaRDD<Integer> clusterLabel =  clusters.predict(durationData);
        JavaRDD<Long> id = sdrData.map(row -> new Long(row.get(1).toString()));
        JavaPairRDD<Long, Integer> clusterLableData = id.zip(clusterLabel);
        clusterLableData.saveAsTextFile("data/mlib/kmeans_output11.txt");

    }
}

但我想用spark ml库来做这件事。

1 个答案:

答案 0 :(得分:0)

K-means是一种无监督的聚类算法,它试图将一组点划分为K个集合(聚类),使得每个聚类中的点往往彼此靠近。

    Dataset<Row> durationDS = sdrDS.select("duration");

在您的代码中,您正在迭代行,同时选择单个列“#duration”&#39;并且您将群集数量设置为2.但是,如果没有基础,您如何将数据分类为群集?

无监督学习算法(在本例中为Kmeans)的本质是,在使用数据集时,不需要指定与数据集逻辑相关的参数。您只需要在模型中传递(拟合)数据集,然后将其分类为集群。

在K-means算法中,模型试图找到K最近邻居。它需要一些数据来对集群进行分类,而您需要通过单个列。

最好使用Spark的Dataframe API来解决您所面临的错误。 Spark自动从MySQL表中读取模式,并将其类型映射回Spark SQL的类型

导入Dataframe对象

> DataFrame jdbcDF= sql.Context.read().format("libsvm").jdbc(POSTGRESQL_CONNECTION_URL,POSTGRESQL_TABLE, options);

现在,您可以使用DF.drop(&#39; ColumnName&#39;)功能删除您不想要的列。 或/并以这种方式拟合数据集..

> KMeansModel model = kmeans.fit(jdbcDF);

另外,如果你能提供数据集

会很棒