我部署了一个具有6个数据节点的DolphinDB集群。我想模拟一个场景,其中600个设备每100毫秒收集一次数据,然后将其写入流表。在每个数据节点中,订阅者订阅流表并将数据写入数据库。我将SQL代码写为波纹管,
//create stream table, then subscribe it
def PrepareStreamEnv(){
clearAllCache()
n = 1000000;
tableSchema = streamTable(n:0,`hardwareId`ts`temp1,[INT,TIMESTAMP,DOUBLE])
share(tableSchema,"sensorInfoTable")
enableTablePersistence(objByName("sensorInfoTable"), false, false, n)
//dfs subscribe
dfsTable = loadTable("dfs://iotDemoDB0","sensorInfoTable")
subscribeTable(, "sensorInfoTable", "save_to_db", -1, append!{dfsTable}, true,50000,1)
}
// create database
def prepareDatabase(){
tableSchema = table(100:0,`hardwareId`ts`temp1,[INT,TIMESTAMP,DOUBLE])
if(exists("dfs://iotDemoDB0"))
dropDatabase("dfs://iotDemoDB0")
db1 = database("",VALUE,(today()-1)..(today()+1))
db2 = database("",RANGE,0..60*1000)
db = database("dfs://iotDemoDB0",COMPO,[db1,db2])
dfsTable = db.createPartitionedTable(tableSchema,"sensorInfoTable",`ts`hardwareId)
}
//clear stream environment
def clearStreamEnv(){
clearTablePersistence(objByName("sensorInfoTable"))
unsubscribeTable(,"sensorInfoTable","save_to_db")
}
//write a record for every devices
def writeData(hardwareVector){
hardwareNumber = size(hardwareVector)
return table(take(hardwareVector,hardwareNumber) as hardwareId ,take(now(),hardwareNumber) as ts,rand(20..41,hardwareNumber) as temp1)
}
def simulate(hardwareIds,interval,freq){
hardwareNum=size(hardwareIds)
PrepareStreamEnv()
t=objByName("sensorInfoTable")
for(i in 0:freq){
t.append!(writeData(hardwareIds))
sleep(interval)
}
clearStreamEnv()
}
def mainJob(interval,freq,devices){
nodes = getDataNodes()
nodeCount = nodes.size()
hardwareVectors = cut(0..(devices*nodeCount - 1), devices)
jobids=array(string, nodeCount, 10);
prepareDatabase()
for( index in 0:nodeCount){
rpc(nodes[index], submitJob, "submit_" + nodes[index], "submit_" + nodes[index], simulate{hardwareVectors[index], interval, freq})
}
}
login("admin","123456")
interval = 100
freq =50
devices = 1000
mainJob(interval,freq,devices)
我在GUI中执行代码,但是发现没有记录插入数据库的表中。然后,我执行以下代码以获取作业信息,
pnodeRun(getRecentJobs)