我正在使用GraphX和Pregel与Java API。我试图实现MaxValue算法(给定加权图并且输出是最大权重)。但我的实施不起作用:
public class Main {
public static void main(String[] args){
SparkConf conf = new SparkConf().setAppName("MaxValue").setMaster("spark://home:7077");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> text_file = sc.textFile(args[0]);
JavaRDD<String[]> text_file_arr = text_file.map(l -> l.split(" "));
//cache
text_file_arr.cache();
//create the vertex RDD
RDD<Tuple2<Object, Integer>> verteces = text_file_arr.map(
t-> new Tuple2<>((Object) Long.parseLong(t[0]), Integer.parseInt(t[t.length-1]))
).rdd();
//create edge RDD
RDD<Edge<Boolean>> edges = text_file_arr
.flatMap( l -> {
List<Edge<Boolean>> edgeList = new ArrayList<>();
long src = Long.parseLong(l[0]);
for (int i = 1;i<l.length-1;++i){
edgeList.add(new Edge(src,Long.parseLong(l[i]),true));
}
return edgeList.iterator();
})
.rdd();
//create the graph
Graph<Integer,Boolean> graph = Graph.apply(
verteces,
edges,
Integer.MIN_VALUE,
StorageLevel.MEMORY_AND_DISK(),
StorageLevel.MEMORY_AND_DISK(),
ClassTag$.MODULE$.apply(Integer.class),
ClassTag$.MODULE$.apply(Boolean.class)
);
graph.edges().toJavaRDD().collect().forEach(System.out::print);
graph.vertices().toJavaRDD().collect().forEach(System.out::print);
GraphOps<Integer,Boolean> graph_ops = new GraphOps<>(
graph,
ClassTag$.MODULE$.apply(Integer.class),
ClassTag$.MODULE$.apply(Boolean.class)
);
//run pregel
Graph<Integer,Boolean> graph_pregel = graph_ops.pregel(
Integer.MIN_VALUE,
3,
EdgeDirection.Either(),
new VProg(),
new SendMsg(),
new Merge(),
ClassTag$.MODULE$.apply(Integer.class)
);
graph_pregel.vertices().toJavaRDD().saveAsTextFile("out");
}
}
这是类VProg,SendMsg和Merge。
class SendMsg extends AbstractFunction1<EdgeTriplet<Integer,Boolean>, Iterator<Tuple2<Object, Integer>>> implements Serializable {
@Override
public Iterator<Tuple2<Object, Integer>> apply(EdgeTriplet<Integer, Boolean> et) {
System.out.println(et.srcId()+" ---> "+et.dstId()+" with: "+et.srcAttr()+" ---> "+et.dstId());
if (et.srcAttr() > et.dstAttr()) {
return JavaConverters.asScalaIteratorConverter(Arrays.asList(et.toTuple()._1()).iterator()).asScala();
}else{
return JavaConverters.asScalaIteratorConverter(new ArrayList<Tuple2<Object, Integer>>().iterator()).asScala();
}
}
}
class VProg extends AbstractFunction3<Object, Integer, Integer, Integer> implements Serializable{
@Override
public Integer apply(Object l, Integer treeNodeThis, Integer treeNodeIn) {
if (treeNodeThis > treeNodeIn) {
System.out.println(l + " : " + treeNodeThis);
return treeNodeThis;
} else {
System.out.println(l + " : " + treeNodeIn);
return treeNodeIn;
}
}
}
class Merge extends AbstractFunction2<Integer, Integer, Integer> implements Serializable{
@Override
public Integer apply(Integer n1, Integer n2) {
return (n1>n2)? n1:n2;
}
}
问题是,在VProg运行节点后,SendMsg正在执行但,值不会更新。这意味着,VProg正在返回新值,但图形仍然是输入的图形。我也尝试过其他算法并遇到同样的问题。也许我写的类VProg,SendMsg或Merge错了?
图表与7个节点连接,每个节点的值为2 ^ nodenumber。
我也尝试过类Pregel,同样的问题...... 我使用的是Spark 2.0.0和Java 8
答案 0 :(得分:1)
经过多次跟踪和错误后,我认为Spark-Pregel Java API中存在一个错误。我用Scala实现了相同的算法,它正在工作:
object Main {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("ScalaMaxValue").setMaster("spark://home:7077")
val sc = new SparkContext(conf)
val text_file_arr: RDD[Array[String]] = sc.textFile(args(0)).map(l => l.split(" "))
val vertices: RDD[(VertexId, Int)] = text_file_arr.map(t => (t(0).toLong, t(t.length - 1).toInt))
val edges: RDD[Edge[Boolean]] = text_file_arr.flatMap(l => {
val edgeList = new ListBuffer[Edge[Boolean]] //: List[Edge[Boolean]] = List()
val i = 0;
val src = l(0).toLong
for (i <- 0 to (l.length - 1)) {
val edge = Edge(src, l(i).toLong, true)
edgeList += edge
}
edgeList.toList
});
val graph = Graph(vertices,edges,Int.MinValue)
val graph_pregel = Pregel(graph,Int.MinValue,Int.MaxValue)(vProg,sendMsg,merge)
//graph_pregel.vertices.saveAsTextFile("out")
println(graph_pregel.vertices.collect()(0))
}
def vProg(id:VertexId, act: Int, other: Int): Int = {
if (other<act){
act
}else{
other
}
}
def sendMsg(et : EdgeTriplet[Int,Boolean]) : Iterator[(VertexId, Int)] = {
if(et.srcAttr > et.dstAttr){
Iterator((et.dstId,et.srcAttr))
}else{
Iterator.empty
}
}
def merge(n1:Int, n2:Int): Int = {
if (n1<n2) n2 else n1
}
}
输入格式为:
#nodeID #neighborID_1 ... #neighborID_N #value
. . .