我有一个Spring Web应用程序。它通过Hibernate Search将模型Education映射到lucene索引:
@Entity
@Table(name="educations")
@Indexed
public class Education {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
@Field(termVector = TermVector.WITH_POSITION_OFFSETS)
private long id;
@Column(name = "name")
@Field(termVector = TermVector.WITH_POSITION_OFFSETS)
@Boost(value = 1.5f)
private String name;
@Column(name = "local_name")
private String localName;
@Column(name = "description", columnDefinition="TEXT")
@Field(termVector = TermVector.WITH_POSITION_OFFSETS)
private String description;
这很棒!
现在我试图通过Mahout 0.9聚集我的Lucene索引。我得到了一个基本的K-means聚类,但我不知道如何将我的Lucene索引转换成Mahout向量。
这是我的基本K-Means聚类类,它与一些测试数据点一起使用,如下所示:
package com.courseportal.project.utils.lsh.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.classify.WeightedPropertyVectorWritable;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.clustering.kmeans.Kluster;
import org.apache.mahout.common.distance.EuclideanDistanceMeasure;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.VectorWritable;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class SimpleKMeansClustering {
public static final double[][] points = {
{1, 1}, {2, 1}, {1, 2},
{2, 2}, {3, 3}, {8, 8},
{9, 8}, {8, 9}, {9, 9}};
public static void writePointsToFile(List<Vector> points,
String fileName,
FileSystem fs,
Configuration conf) throws IOException {
Path path = new Path(fileName);
SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
path, LongWritable.class, VectorWritable.class);
long recNum = 0;
VectorWritable vec = new VectorWritable();
for (Vector point : points) {
vec.set(point);
writer.append(new LongWritable(recNum++), vec);
}
writer.close();
}
public static List<Vector> getPoints(double[][] raw) {
List<Vector> points = new ArrayList<Vector>();
for (int i = 0; i < raw.length; i++) {
double[] fr = raw[i];
Vector vec = new RandomAccessSparseVector(fr.length);
vec.assign(fr);
points.add(vec);
}
return points;
}
public static void main(String args[]) throws Exception {
int k = 2;
List<Vector> vectors = getPoints(points);
File testData = new File("clustering/testdata");
if (!testData.exists()) {
testData.mkdir();
}
testData = new File("clustering/testdata/points");
if (!testData.exists()) {
testData.mkdir();
}
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
writePointsToFile(vectors, "clustering/testdata/points/file1", fs, conf);
Path path = new Path("clustering/testdata/clusters/part-00000");
SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path, Text.class, Kluster.class);
for (int i = 0; i < k; i++) {
Vector vec = vectors.get(i);
Kluster cluster = new Kluster(vec, i, new EuclideanDistanceMeasure());
writer.append(new Text(cluster.getIdentifier()), cluster);
}
writer.close();
KMeansDriver.run(conf,
new Path("clustering/testdata/points"),
new Path("clustering/testdata/clusters"),
new Path("clustering/output"),
0.001,
10,
true,
0,
true);
SequenceFile.Reader reader = new SequenceFile.Reader(fs,
new Path("clustering/output/" + Cluster.CLUSTERED_POINTS_DIR + "/part-m-0"), conf);
IntWritable key = new IntWritable();
WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();
while (reader.next(key, value)) {
System.out.println(value.toString() + " belongs to cluster " + key.toString());
}
reader.close();
}
}
我读过(here)我应该使用LuceneIndexToSequenceFiles来做到这一点,但我无法在Mahout 0.9中找到该课程。这是我必须手动拉的东西吗?
如何将我的索引转换为使用我的K-means聚类类?
答案 0 :(得分:2)
您可以使用package org.apache.mahout.text;
和类SequenceFilesFromLuceneStorageMRJob
(针对分布式转换)或SequenceFilesFromLuceneStorageDriver
。
您可以在mahout-0.9测试中找到有关其用法的更多信息。例如:
mahout-0.9/integration/src/test/java/org/apache/mahout/text/SequenceFilesFromLuceneStorageDriverTest.java
mahout-0.9/integration/src/test/java/org/apache/mahout/text/SequenceFilesFromLuceneStorageMRJob.java
和https://mahout.apache.org/users/basics/creating-vectors-from-text.html
重要提示:必须使用Mahout中使用的相同版本的Lucene创建Lucene索引。
答案 1 :(得分:1)
详细说明这个帖子中的答案,并可能在将来帮助某人。这就是我最终做到的。
store = Store.YES
放在我的休眠字段上。如前所述,我必须确保Hibernate搜索和Mahout使用相同的Lucene版本。这是我目前的pom.xml
<!-- **********************************************************************
** DEPENDENCIES FOR MACHINE LEARNING **
********************************************************************** -->
<dependency>
<groupId>org.hibernate</groupId>
<artifactId>hibernate-search</artifactId>
<version>5.0.0.Alpha2</version>
</dependency>
<dependency>
<groupId>org.apache.mahout</groupId>
<artifactId>mahout-core</artifactId>
<version>0.9</version>
</dependency>
<dependency>
<groupId>org.apache.mahout</groupId>
<artifactId>mahout-integration</artifactId>
<version>0.9</version>
</dependency>
<dependency>
<groupId>org.apache.mahout</groupId>
<artifactId>mahout-utils</artifactId>
<version>0.5</version>
</dependency>
然后我创建了序列文件,生成稀疏向量并通过以下代码执行聚类。仍需要进行优化。
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.canopy.CanopyDriver;
import org.apache.mahout.clustering.classify.WeightedPropertyVectorWritable;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.common.distance.TanimotoDistanceMeasure;
import org.apache.mahout.text.LuceneStorageConfiguration;
import org.apache.mahout.text.SequenceFilesFromLuceneStorage;
import org.apache.mahout.vectorizer.SparseVectorsFromSequenceFiles;
import com.google.common.collect.Lists;
import java.util.Arrays;
import java.util.List;
public class SimpleKMeansClustering {
public static void main(String args[]) throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path indexFilesPath = new Path("lucene/indexes/educations");
Path sequenceFilesPath = new Path("clustering/testdata/sequencefiles/");
Path sparseVectorsPath = new Path("clustering/testdata/sparsevectors/");
Path tfVectorsPath = new Path("clustering/testdata/sparsevectors/tf-vectors");
Path inputClustersPath = new Path("clustering/testdata/input-clusters");
Path finishedInputClustersPath = new Path("clustering/testdata/input-clusters/clusters-0-final");
Path finalClustersPath = new Path("clustering/output");
//Create sequence files from Index
LuceneStorageConfiguration luceneStorageConf = new LuceneStorageConfiguration(conf,
Arrays.asList(indexFilesPath), sequenceFilesPath, "id",
Arrays.asList("name", "description"));
SequenceFilesFromLuceneStorage sequenceFilefromLuceneStorage = new SequenceFilesFromLuceneStorage();
sequenceFilefromLuceneStorage.run(luceneStorageConf);
//Generate Sparse vectors from sequence files
generateSparseVectors(true,
true,
true,
5,
4,
sequenceFilesPath,
sparseVectorsPath);
//Generate input clusters for K-means (instead of have K randomly initiated)
TanimotoDistanceMeasure tanimoDistance = new TanimotoDistanceMeasure();
CanopyDriver.run(tfVectorsPath,
inputClustersPath,
tanimoDistance,
(float) 3.1,
(float) 2.1,
false,
(float) 0.2,
true);
//Generate K-Means clusters
KMeansDriver.run(conf,
tfVectorsPath,
finishedInputClustersPath,
finalClustersPath,
0.001,
10,
true,
0,
true);
//Read and print out the clusters in the console
SequenceFile.Reader reader = new SequenceFile.Reader(fs,
new Path("clustering/output/" + Cluster.CLUSTERED_POINTS_DIR + "/part-m-0"),
conf);
IntWritable key = new IntWritable();
WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();
while (reader.next(key, value)) {
System.out.println(value.toString() + " belongs to cluster " + key.toString());
}
reader.close();
}
public static void generateSparseVectors (boolean tfWeighting, boolean sequential, boolean named, double maxDFSigma, int numDocs, Path inputPath, Path outputPath) throws Exception {
List argList = Lists.newLinkedList();
argList.add("-i");
argList.add(inputPath.toString());
argList.add("-o");
argList.add(outputPath.toString());
if (sequential) {
argList.add("-seq");
}
if (named) {
argList.add("-nv");
}
if (maxDFSigma >= 0) {
argList.add("--maxDFSigma");
argList.add(String.valueOf(maxDFSigma));
}
if (tfWeighting) {
argList.add("--weight");
argList.add("tf");
}
String[] args = argList.toArray(new String[argList.size()]);
ToolRunner.run(new SparseVectorsFromSequenceFiles(), args);
}
}