如何将不同的颜色应用于我的数据,就像我可以将不同的颜色定义为元组一样?
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import kafka.serializer.StringDecoder;
import properties.PropertyCache;
import scala.Tuple2;
public class CustomerKafkaConsumerThread implements Serializable {
String broker;
String jars[]={"C:\\iot-kafka-producer-1.0.0.jar"};
private static final Pattern SPACE = Pattern.compile(" ");
public void sparkKafkaConsumer(String topics, String broker) throws InterruptedException {
System.out.println("INSIDE SPARK KAFKACONSUMER METHOD..........");
this.broker = broker;
SparkConf conf = new SparkConf().setAppName("CustomerKafkaConsumerThread")
.set("spark.local.ip", "10.41.81.17")
.setMaster("local[*]").setJars(jars);
/* .setJars(new String[]{
"C:/Users/pusarla/workspace/spark/iot-kafka-producer/target/iot-kafka-producer-1.0.0.jar"
});*/
JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(2000));
Map<String, String> kafkaParams = new HashMap<String, String>();
kafkaParams.put("metadata.broker.list", broker);
Set<String> topicSet = Collections.singleton(topics);
System.out.println("Creating direct kafka stream with brokers and topics..........");
// Create direct kafka stream with brokers and topics
JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class,
StringDecoder.class, StringDecoder.class, kafkaParams, topicSet);
JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
public String call(Tuple2<String, String> tuple2) {
return tuple2._2();
}
});
lines.foreachRDD(rdd -> {
if (rdd.count() > 0) {
List<String> strArray = rdd.collect();
Iterator<String> topicData=strArray.iterator();
while(topicData.hasNext()){
System.out.println("PRINTING PTINTING >>>>>>>>>>>>" +topicData.next());
}
}
});
jssc.start();
jssc.awaitTermination();
}
}
阴影线一起应用于所有条形图。相反,我宁愿使用自己的阴影颜色组合来拥有每个数据集。
我知道我可以手动修改图中的每个补丁:
#!/usr/bin/env python3
import pandas
from matplotlib import pyplot as plt
data = {"Label 1": [2,3,5,10], "Label 2": [1,2,4,8]}
pandas.DataFrame(data).plot.bar(color=("grey", "white"), hatch=("/", "*"))
plt.show()
这是一种hacky,但是最好的解决方案,我通过this discussion找到了。
如何将阴影线应用于组合图中的不同数据集?