我使用Spark来消耗Kafka的数据并将其保存在Cassandra中。我的程序是用Java编写的。我正在使用spark-streaming-kafka_2.10:1.6.2
lib来完成此任务。我的代码是:
SparkConf sparkConf = new SparkConf().setAppName("name");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));
Map<String,String> kafkaParams = new HashMap<>();
kafkaParams.put("zookeeper.connect", "127.0.0.1");
kafkaParams.put("group.id", App.GROUP);
JavaPairReceiverInputDStream<String, EventLog> messages =
KafkaUtils.createStream(jssc, String.class, EventLog.class, StringDecoder.class, EventLogDecoder.class,
kafkaParams, topicMap, StorageLevel.MEMORY_AND_DISK_SER_2());
JavaDStream<EventLog> lines = messages.map(new Function<Tuple2<String, EventLog>, EventLog>() {
@Override
public EventLog call(Tuple2<String, EventLog> tuple2) {
return tuple2._2();
}
});
lines.foreachRDD(rdd -> {
javaFunctions(rdd).writerBuilder("test", "event_log", mapToRow(EventLog.class)).saveToCassandra();
});
jssc.start();
在我的Cassandra表event_log
中,有一个名为offsetid
的列用于存储流的偏移ID。如何获取偏移ID,直到该流读取Kafka流并将其存储在Cassandra中?
在Cassandra中保存之后,我想使用Spark再次启动时使用的最新偏移ID。我该怎么做?
答案 0 :(得分:3)
因此,您希望自己管理kafka偏移量。
为此:
使用createDirectStream而不是createStream。这将允许您指定您想要阅读的偏移量(fromOffsets: Map[TopicAndPartition, Long]
)
收集有关您已处理的偏移的信息。这可以通过为每条消息保存偏移量来完成,也可以将这些信息聚合在单独的表中。要获得偏移范围,请使用rdd:rdd.asInstanceOf[HasOffsetRanges].offsetRanges
。对于java(根据文档)http://spark.apache.org/docs/latest/streaming-kafka-integration.html OffsetRange[] offsets = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
答案 1 :(得分:3)
以下是您可能需要根据您的要求更改内容的参考代码。我在代码和方法上所做的就是为Cassandra中的每个主题维护Kafka分区智能偏移(这可以在zookeeper中完成,也可以作为使用其java api的建议)。在EventLog表中存储或更新收到的每个字符串消息的主题的最新偏移范围。所以总是从表中检索并查看是否存在,然后从该偏移量创建直接流,否则直接流新鲜。
package com.spark;
import static com.datastax.spark.connector.japi.CassandraJavaUtil.javaFunctions;
import static com.datastax.spark.connector.japi.CassandraJavaUtil.mapRowTo;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import kafka.common.TopicAndPartition;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.HasOffsetRanges;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.apache.spark.streaming.kafka.OffsetRange;
import scala.Tuple2;
public class KafkaChannelFetchOffset {
public static void main(String[] args) {
String topicName = "topicName";
SparkConf sparkConf = new SparkConf().setAppName("name");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));
HashSet<String> topicsSet = new HashSet<String>(Arrays.asList(topicName));
HashMap<TopicAndPartition, Long> kafkaTopicPartition = new HashMap<TopicAndPartition, Long>();
Map<String, String> kafkaParams = new HashMap<>();
kafkaParams.put("zookeeper.connect", "127.0.0.1");
kafkaParams.put("group.id", "GROUP");
kafkaParams.put("metadata.broker.list", "127.0.0.1");
List<EventLog> eventLogList = javaFunctions(jssc).cassandraTable("test", "event_log", mapRowTo(EventLog.class))
.select("topicName", "partion", "fromOffset", "untilOffset").where("topicName=?", topicName).collect();
JavaDStream<String> kafkaOutStream = null;
if (eventLogList == null || eventLogList.isEmpty()) {
kafkaOutStream = KafkaUtils.createDirectStream(jssc, String.class, String.class, StringDecoder.class, StringDecoder.class, kafkaParams,
topicsSet).transform(new Function<JavaPairRDD<String, String>, JavaRDD<String>>() {
@Override
public JavaRDD<String> call(JavaPairRDD<String, String> pairRdd) throws Exception {
JavaRDD<String> rdd = pairRdd.map(new Function<Tuple2<String, String>, String>() {
@Override
public String call(Tuple2<String, String> arg0) throws Exception {
return arg0._2;
}
});
writeOffset(rdd, ((HasOffsetRanges) rdd.rdd()).offsetRanges());
return rdd;
}
});
} else {
for (EventLog eventLog : eventLogList) {
kafkaTopicPartition.put(new TopicAndPartition(topicName, Integer.parseInt(eventLog.getPartition())),
Long.parseLong(eventLog.getUntilOffset()));
}
kafkaOutStream = KafkaUtils.createDirectStream(jssc, String.class, String.class, StringDecoder.class, StringDecoder.class, String.class,
kafkaParams, kafkaTopicPartition, new Function<MessageAndMetadata<String, String>, String>() {
@Override
public String call(MessageAndMetadata<String, String> arg0) throws Exception {
return arg0.message();
}
}).transform(new Function<JavaRDD<String>, JavaRDD<String>>() {
@Override
public JavaRDD<String> call(JavaRDD<String> rdd) throws Exception {
writeOffset(rdd, ((HasOffsetRanges) rdd.rdd()).offsetRanges());
return rdd;
}
});
}
// Use kafkaOutStream for further processing.
jssc.start();
}
private static void writeOffset(JavaRDD<String> rdd, final OffsetRange[] offsets) {
for (OffsetRange offsetRange : offsets) {
EventLog eventLog = new EventLog();
eventLog.setTopicName(String.valueOf(offsetRange.topic()));
eventLog.setPartition(String.valueOf(offsetRange.partition()));
eventLog.setFromOffset(String.valueOf(offsetRange.fromOffset()));
eventLog.setUntilOffset(String.valueOf(offsetRange.untilOffset()));
javaFunctions(rdd).writerBuilder("test", "event_log", null).saveToCassandra();
}
}
}
希望这有助于解决您的问题...