我正在尝试实施 author
标记,我们编写了一条规则,即每个新生成的文章都应自动标记 date
, import MessagesProto #Your proto.py file
from datetime import datetime as dt
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.sql.functions import udf
def message_proto(value):
m = MessagesProto.message_x()
m.ParseFromString(value)
return({'x': y,
'z': w
})
schema_impressions = StructType() \
.add("x", StringType()) \
.add("z", TimestampType())
proto_udf = udf(message_proto, schema_impressions)
class StructuredStreaming():
def structured_streming(self):
stream = self.spark.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", self.kafka_bootstrap_servers) \
.option("subscribe", self.topic) \
.option("startingOffsets", self.startingOffsets) \
.option("max.poll.records", self.max_poll_records) \
.option("auto.commit.interval.ms", self.auto_commit_interval_ms) \
.option("session.timeout.ms", self.session_timeout_ms) \
.option("key.deserializer", self.key_deserializer) \
.option("value.deserializer", self.value_deserializer) \
.load()
self.query = stream \
.select(col("value")) \
.select(proto_udf("value").alias("value_udf")) \
.select("value_udf.x", "valued_udf.y)
等
但由于某种原因,它并没有完全验证它们:
Here是我们撰写文章的一个例子。