我正在评估喀拉斯河和张量流联合模型的回归问题。两者的性能基本上都是MSE。唯一的区别是: 1.分割数据集的方式。 2.损失函数:
# keras model loss function
def loss_fn():
return tf.keras.losses.MeanSquaredError()
# Federated model loss function
def loss_fn_Federated(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.MSE(y_true, y_pred))
请帮助我改进联盟模型。
tf.compat.v1.enable_v2_behavior()
train_perc = 0.8
Norm_Input = True
Norm_Output = True
Input_str = ['latitude', 'longitude']
if Norm_Output:
Output_str = ['BeamRSRP_max_Normazlied']
else:
Output_str = ['BeamRSRP_max']
Final_Tag = 'Tag4' # here just decide which tagging method do you want to use
Num_Clients = 2 # cannot be less than one
Num_Cons_Sample_Client = 20 # cannot be less than one
max_thr_all = 1000000000
learn_rate = .01
Val_Train_Split = 0.8
SNN_epoch = 50
SNN_batch_size = 1100
shuffle_buffer = 200
SNN_Layers = [10,100,100,100,100,10] # layers Dense
SNN_epsilon =0.1
SNN_decay = 0.01
datetime
Sim_Feature_Name = "-All-"
add_path_name = "Norm"+str(Norm_Output*1) +Sim_Feature_Name
tosave_Path = add_path_name+str(datetime.datetime.now().hour) + '-'+str(datetime.datetime.now().minute)+'-' + str(datetime.datetime.now().second)+'/'
Data_2018 = False
if Data_2018:
tmp_removed = ["gpsTime","UEC/CSI/CSI_rs_ssb_idx","UEC/CSI/CSI_rs_ssb_rsrp","TX/CSI_RX/Beam RSRP dBm","TX/CSI_RX/Channel Quality Indicator"]
TobeRemoved_Array = ["gpsTime_float","core","ssbRSRP_max","RX/PBCH_Rx/Cell id","ssbidx_max","TX/CSI_RX/Precoding Matrix Indicator","CQI_max","UEC/UEC/L1_RX_throughput_mbps","BeamRSRP_max","UEC/PBCH/PBCH_SINR","BeamRSRP_max_Normazlied","log","is_training"]
else:
tmp_removed = ["gpsTime","TX/CSI_RX/Beam RSRP dBm","nmea_raw","core_y"]
TobeRemoved_Array = ['TX/CSI_RX/Channel Quality Indicator', 'core', 'epoch', 'TX/CSI_RX/Efficiency', 'TX/CSI_RX/Estimated Freq Error', 'TX/CSI_RX/Estimated Time Error', 'TX/CSI_RX/Precoding Matrix Indicator', 'TX/CSI_RX/Rank Indicator', 'log', 'BeamRSRP_max', 'CQI_max', 'gpsTime_float', 'BeamRSRP_max_Normazlied', 'is_training']
if not os.path.isdir(tosave_Path):
os.makedirs(tosave_Path)
# Load simulation data.
##############################################
dir_name = 'pickle-data/'
file_name = 'all_logs_april_2019.pickle'
files = os.listdir('pickle-data/')
dataframe = Import_Pickle.Import_v1(dir_name,file_name,Data_2018) # choose False to use 2019 data
# Just to reduce the processing
ave = dataframe.core.min() + max_thr_all
#df2 = dataframe.drop(dataframe[dataframe.core < ave].index)
df2 = dataframe[dataframe.core < ave]
df = Import_Pickle.PreProcessing_v2019(df2,Norm_Input,tmp_removed)
train_df,test_df,X_traindf,X_testdf,Y_traindf,Y_testdf,XY_traindf,XY_testdf = Import_Pickle.Splitting_Train_Test(df,train_perc,Norm_Output,TobeRemoved_Array)
########## splitting for clients ############
def Tag_per_day(train_df_loc,TagNum):
train_df_loc['log2'] = train_df_loc['log'].apply(lambda x: x.replace("_",""))
tag_Index = train_df_loc.log2.apply(lambda x: x.index("201"))
tag_Index2 = tag_Index.values[1]
tag_date =train_df_loc.log2.apply(lambda x: x[tag_Index2:tag_Index2+8])
train_df_loc.loc[:,'Tag'+str(TagNum)] = pd.Series(tag_date.to_list(),index=train_df.index) # to be fixed
return train_df_loc
# Introduce time as input
X_traindf['gpsTime_float'] = train_df['gpsTime_float']
# introduce first tag per day
TagNum=1
train_df = Tag_per_day(train_df,TagNum)
#examples on groupby
Unq_tag1_grps = list(train_df.groupby(train_df.Tag1).groups.keys())
train_df.groupby(train_df.Tag1).first()
train_df.groupby(train_df.Tag1)['gpsTime_float'].count()
X_traindf['Tag'+str(TagNum)] = train_df['Tag'+str(TagNum)]
#############################
# introduce epoch as tag
#############################
TagNum=2
train_df['Tag'+str(TagNum)] = train_df.epoch
X_traindf['Tag'+str(TagNum)] = train_df['Tag'+str(TagNum)]
#############################
# introduce core as tag
#############################
TagNum=3
train_df['Tag'+str(TagNum)] = train_df.core
X_traindf['Tag'+str(TagNum)] = train_df['Tag'+str(TagNum)]
#############################
# introduce day as tag per client
#############################
TagNum = 4
RepNum = np.ceil(train_df.shape[0]/(Num_Cons_Sample_Client*Num_Clients))
Part_Tag_Array=[]
for i in np.arange(Num_Clients):
Part_Tag_Tmp = list(map(lambda _: i+1,range(Num_Cons_Sample_Client)))
Part_Tag_Array.extend(Part_Tag_Tmp)
Full_Tag_Array2 = Part_Tag_Array * int(RepNum)
extra_tags = np.abs(len(Full_Tag_Array2) - train_df.shape[0])
Full_Tag_Array = Full_Tag_Array2[:-extra_tags]
train_df.loc[:,'Tag'+str(TagNum)] = pd.Series(Full_Tag_Array,index=train_df.index)
X_traindf.loc[:,'Tag'+str(TagNum)] = train_df['Tag'+str(TagNum)]
#############################
# END day as tag per client
#############################
######### Introduce gpsTime and Tag to the input
Input_str.extend(['gpsTime_float',Final_Tag])
#FLObj = FLTest()
#FLObj.test_self_contained_example(X_traindf[Input_str].values, Y_traindf[Output_str].values)
###### Adding StandardSalarization:
scaler = StandardScaler()
removed_column = Input_str.pop()
X_train_ScaledTmp = scaler.fit_transform(X_traindf[Input_str],Y_traindf[Output_str])
# Adding Int tag per client without scalarization
X_train_Scaled = np.c_[X_train_ScaledTmp, train_df[removed_column].values.reshape(train_df.shape[0],1)]
# X_train_Scaled = scaler.transform(X_traindf[Input_str])
# All In/Out data Numpy
Act_Inputs_Int_Tag = X_train_Scaled
Act_Outputs_Int = Y_traindf[Output_str].values
# Remove Tags
Act_Inputs_Int = np.delete(Act_Inputs_Int_Tag,-1,axis=1)
# prepare In/Out per Client
All_Act_Inputs_Int_Tag = [Act_Inputs_Int_Tag[np.where(Act_Inputs_Int_Tag[:,-1]== x)] for x in np.arange(1,Num_Clients+1)]
All_Act_Outputs_Int = [Act_Outputs_Int[np.where(Act_Inputs_Int_Tag[:,-1]== x)] for x in np.arange(1,Num_Clients+1)]
# Remove Tags
All_Act_Inputs_Int = [np.delete(All_Act_Inputs_Int_Tag[x],-1,axis=1) for x in np.arange(0,Num_Clients) ]
# a need conversion to float32
Act_Inputs = np.float32(Act_Inputs_Int)
Act_Outputs = np.float32(Act_Outputs_Int)
# convert dataset to client based dataset
All_Act_Inputs = [np.float32(All_Act_Inputs_Int[x]) for x in np.arange(0,Num_Clients)]
All_Act_Outputs = [np.float32(All_Act_Outputs_Int[x]) for x in np.arange(0,Num_Clients)]
# convert to OrderedDict
new_batch = collections.OrderedDict([('In', Act_Inputs),('Out', Act_Outputs)])
All_new_batch = [collections.OrderedDict([('In', All_Act_Inputs[x]),('Out', All_Act_Outputs[x])]) for x in np.arange(0,Num_Clients)]
# Convert to tensor
dataset_input = tf.data.Dataset.from_tensor_slices(new_batch)#,,maxval=100, dtype=tf.float32)
# All_new_batch has different item per In / Out
All_dataset_input = [tf.data.Dataset.from_tensor_slices(All_new_batch[x]) for x in np.arange(0,Num_Clients)]
# Select among the datasets
Used_dataset= dataset_input
All_Used_dataset= All_dataset_input
with eager_mode():
def preprocess(new_dataset):
#return Used_dataset.repeat(2).batch(2)
def map_fn(elem):
return collections.OrderedDict([('x', tf.reshape(elem['In'], [-1])),('y', tf.reshape(elem['Out'],[1]))])
DS2= new_dataset.map(map_fn)
#return DS2.repeat(SNN_epoch).map(map_fn).shuffle(shuffle_buffer).batch(SNN_batch_size)
return DS2.repeat(SNN_epoch).batch(SNN_batch_size)
train_data = [preprocess(Used_dataset)]
#######changes###############33
def make_federated_data(client_data, client_ids):
return [preprocess(client_data[x]) for x in client_ids]
#@test {"output": "ignore"}
# sample_clients = [0:Num_Clients]
federated_train_data = make_federated_data(All_Used_dataset, np.arange(0,Num_Clients))
sample_batch = tf.contrib.framework.nest.map_structure(lambda x: x.numpy(), next(iter(train_data[0])))
########## END Changes ############
def create_SK_model():
modelF = tf.keras.models.Sequential([tf.keras.layers.Dense(SNN_Layers[0],activation=tf.nn.relu,input_shape=(Act_Inputs.shape[1],), kernel_initializer='RandomNormal'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(SNN_Layers[1], activation=tf.nn.relu, kernel_initializer='RandomNormal'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation=tf.nn.relu, kernel_initializer='RandomNormal'),
])
return modelF
# keras model loss function
def loss_fn():
return tf.keras.losses.MeanSquaredError()
# Federated model loss function
def loss_fn_Federated(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.MSE(y_true, y_pred))
def model_fn_Federated():
return tff.learning.from_keras_model(create_SK_model(),sample_batch,
loss=loss_fn_Federated,
optimizer=gradient_descent.SGD(learn_rate))
YTrain = Act_Outputs #np.random.rand(50,1)
XTrain = Act_Inputs #np.random.rand(50,100)
# locally compile the model
Local_model = create_SK_model()
Local_model.compile(loss=tf.keras.losses.MeanSquaredError(),optimizer=tf.keras.optimizers.SGD(lr=learn_rate,decay=1e-6,momentum=0.9,nesterov=True))
# fitting without federated learning
trained_local_Model = Local_model.fit(XTrain,YTrain, validation_split=Val_Train_Split, epochs=SNN_epoch, batch_size=SNN_batch_size) #tbuc
# Loss of local model
Local_Loss = trained_local_Model.history['loss'] # tbuc
# Copy local model for comparison purposes
Local_model_Fed = Local_model
# training/fitting with TF federated learning
trainer_Itr_Process = tff.learning.build_federated_averaging_process(model_fn_Federated,server_optimizer_fn=(lambda : gradient_descent.SGD(learning_rate=learn_rate)),client_weight_fn=None)
FLstate = trainer_Itr_Process.initialize()
FL_Loss_arr = []
Fed_eval_arr = []
# Track loss of different ...... of federated iteration
for round_num in range(2,10):
"""
The second of the pair of federated computations, next, represents a single round of Federated Averaging, which consists of pushing the server state (including the model parameters) to the clients, on-device training on their local data, collecting and averaging model updates, and producing a new updated model at the server.
"""
FLstate, FLoutputs = trainer_Itr_Process.next(FLstate, federated_train_data)
# Track the loss.
FL_Loss_arr.append(FLoutputs.loss)
# Setting federated weights on copied Object of local model
tff.learning.assign_weights_to_keras_model(Local_model_Fed,FLstate.model)
#Local_model_Fed.set_weights(tff_weights)
print(tff.__name__)
# Evaluate loss of the copied federated weights on local model
Fed_predicted = Local_model_Fed.predict(XTrain)
Fed_eval = Local_model_Fed.evaluate(XTrain,YTrain)
Fed_eval_arr.append(Fed_eval)
if True:
FieldnamesSNN = ['Local_Loss', 'FL_Loss_arr','Fed_eval_arr']
Valuesall2 = [Local_Loss,FL_Loss_arr,Fed_eval_arr]
# ValuesallSNN = Valuesall2.transpose()
ValuesallSNN = Valuesall2
workbook = xlsxwriter.Workbook(tosave_Path + Sim_Feature_Name+'SNN_loss.xlsx')
worksheetSNN = workbook.add_worksheet(Sim_Feature_Name+'SNN_loss')
row = 0
col = 0
#Write Validation results
prev_col_len=0
for names in FieldnamesSNN:
row=0
worksheetSNN.write(row,col,names)
# values = ValuesallSNN[:,col]
values = np.array(ValuesallSNN)[col]
row=row + 1
for val in values:
print(val)
worksheetSNN.write(row,col,val)
row=row+1
col = col +1
workbook.close()
当前结果是 (Local_Loss用于keras模型,FL_Loss_arr:是每个客户端的损失,Fed_eval_arr:是聚合模式的损失)
Local_Loss FL_Loss_arr Fed_eval_arr
0.361531615257263 0.027410915121436 0.386061603840212
0.354410231113434 0.026805186644197 0.378279162582626
0.32423609495163 0.026369236409664 0.370627223614037
0.287901371717453 0.02615818567574 0.363125243503663
0.244472771883011 0.025971807539463 0.355770364471598
0.203615099191666 0.025779465213418 0.348538321804381
0.165129363536835 0.025623736903071 0.341443773817459
0.130221307277679 0.025475736707449 0.334481204779932
0.103743642568588
0.084212586283684
0.065002344548702
0.057881370186806
0.054710797965527
0.050441317260265
0.050083305686712
0.049112796783447
0.050076562911272
0.051196228712797
0.05450239777565
0.053276151418686
答案 0 :(得分:1)
我看到两件事突然出现在我身上。
首先,line
的双重使用。 here中引入的联合平均首先计算客户端更新,其中梯度将根据学习率进行缩放,然后将其汇总为服务器上的加权平均值。特别是,服务器也不会按照学习速率进行扩展。按学习速率两次对更新进行缩放将导致学习速率的有效平方,这很容易解释急剧的下降。
第二,使用批处理规范化。联合优化中的批量归一化在很大程度上是一个开放的研究领域。尚不清楚在联邦设置中是否会实现与数据中心相同的优化优势。您对这个问题的答案的信念应取决于您认为BatchNorm发挥其神奇作用的机制,并且在此debate上有recently。
话虽如此,我将尝试将服务器的学习速率设置为1,删除BatchNorm层,并确保集中模型和联合模型正在处理相等数量的数据,以便进行直接比较。极端情况下的联合平均只会减少到梯度下降,因此,如果您看到鲜明的对比,则优化问题可能会出现规格错误。
希望这会有所帮助!