对于相同的过滤条件,我有多个目标col可以求和。使用hbase作为数据库。所以我试图用风暴三叉戟来完成它。在任何SQL中,在相同的where / group中,总是可以求和多个目标cols。但是我没有做到三叉戟。错误提示“将意外的委托交给组合列表”。
这是我的代码:
public class HbaseWordCountTridentTopolopgyAggedSumManyLong {
static public class WordCountValueMapper implements HBaseValueMapper {
@Override
public List<Values> toValues(ITuple tuple, Result result) throws Exception {
List<Values> values = new ArrayList<Values>();
Cell[] cells = result.rawCells();
for(Cell cell : cells) {
String colName = Bytes.toString(CellUtil.cloneQualifier(cell));
Values value = new Values (colName, Bytes.toLong(CellUtil.cloneValue(cell)));
values.add(value);
}
return values;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("columnName","columnValue"));
}
}
public static class One extends BaseFunction {
public One() {
}
public void execute(TridentTuple tuple, TridentCollector collector) {
String word = tuple.getString(0);
//RANDOM.nextGaussian()
collector.emit(new Values(1L, 2L));
}
}
public static StormTopology buildTopology() {
Fields fields = new Fields("word", "count", "count2");
FixedBatchSpout spout2Split = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
new Values("how many apples can you eat"), new Values("to be or not to be the person"));
spout2Split.setCycle(true);
TridentHBaseMapper tridentHBaseMapper = new SimpleTridentHBaseMapper()
.withColumnFamily("INFO")
.withColumnFields(new Fields("word"))
.withCounterFields(new Fields("count", "count2"))
.withRowKeyField("word");
HBaseValueMapper rowToStormValueMapper = new WordCountValueMapper();
HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("INFO", "count"));
projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("INFO", "count2"));
HBaseState.Options options = new HBaseState.Options()
.withDurability(Durability.SYNC_WAL)
.withMapper(tridentHBaseMapper)
.withProjectionCriteria(projectionCriteria)
.withRowToStormValueMapper(rowToStormValueMapper)
.withTableName("test_HbaseWordCountTridentTopolopgy");
StateFactory factory = new HBaseStateFactory(options);
TridentTopology topology = new TridentTopology();
Stream stream =
topology.newStream("spout2Split", spout2Split)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.each(new Fields("word"), new One(), new Fields("one", "one2"))
.groupBy(new Fields("word"))
.aggregate(new Fields("one", "one2"), new Sum(), new Fields("count", "count2"))
;
stream.partitionPersist(factory, fields, new HBaseUpdater(), new Fields());
TridentState state = topology.newStaticState(factory);
stream = stream.stateQuery(state, new Fields("word"), new HBaseQuery(), new Fields("columnName","columnValue"));
stream.each(new Fields("word","columnValue"), new PrintFunction(), new Fields());
return topology.build();
}
public static void main(String[] args) throws Exception{
Map<String, Object> hbConf = new HashMap<String, Object>();
Config conf = new Config();
conf.setMaxSpoutPending(5);
boolean checkLocal = Arrays.stream(args).map(arg -> arg.equals("local")).reduce((arg1, arg2) -> arg1 | arg2 ).orElse(false);
if (checkLocal) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("HbaseWordCountTridentTopolopgy", conf, buildTopology());
Thread.sleep(60 * 1000);
}else {//if(args.length == 2) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology("hbase-word-count-trident", conf, buildTopology());
}
}
}