当用户手动清除日期字段框(用户将日期字段更改为空白)时,Extjs绑定值未清除
我无法发布代码,但是找到了类似的fiddle
在这个小提琴中,我希望在手动清除datefield时清除该值,相反,显示字段一直显示旧值
如果有人可以为我提供解决方案,那将是很大的帮助
答案 0 :(得分:1)
您可以对specialkey
使用datefield
事件来获得所需的结果。
您可以在这里使用fiddle进行检查。
注意,您可以根据需要输入逻辑。我只是创建了一个简单的示例。
代码段
import numpy as np
import sys
#expPath = sys.argv[1]
#size = sys.argv[2]
#variables = sys.argv[3]
#hiddenLayers = sys.argv[4]
#train = sys.argv[5]
#nodesPerLayer = sys.argv[6]
#predictions_filename = 'predictions_' + size + '_' + variables + '_' + hiddenLayers + '_' + str(nodesPerLayer) + '_' + str(train) + '.txt'
#truth_filename = 'truth_' + size + '_' + variables + '_' + str(train) + '.txt'
#
#predictions = np.loadtxt(thesis_home + '/temporary/' + predictions_filename)
#truth = np.loadtxt(thesis_home + '/temporary/' + truth_filename)
def analysis(truth, predictions, size, variables, hiddenLayers, nodesPerLayer,train):
print('INFO: Analysis part')
thesis_home = '/home/r/Raphael.Kriegmair/uni/master/thesis'
index_size = {0 : 10000}
index_nodesPerLayer = {0 : 50, 1 : 100, 2 : 250, 3 : 500, 4 : 750}
index_train = {0 : 0.1, 1 : 0.3, 2 : 0.5, 3 : 0.7, 4 : 0.9}
index_hiddenLayers = {0 : 1, 1 : 2, 2 : 3, 3 : 4, 4 : 5}
# index_variables = {0 : 'u', 1 : 'h', 2 : 'r', # single
# 3 : 'u', 4 : 'h', # pair
# 5 : 'u', 6 : 'r', # pair
# 7 : 'h', 8 : 'r', # pair
# 9 : 'u', 10 : 'h', 11 : 'r'} # combined
size_index = {v:k for k,v in index_size.items()}
nodesPerLayer_index = {v:k for k,v in index_nodesPerLayer.items()}
train_index = {v:k for k,v in index_train.items()}
hiddenLayers_index = {v:k for k,v in index_hiddenLayers.items()}
#variables_index = {v:k for k,v in index_variables.items()}
# 12 is trash key
variable_keys = {'u': (0,12,12),
'h': (12,1,12),
'r': (12,12,2),
'uh': (3,4,12),
'ur': (5,12,6),
'hr': (12,7,8),
'uhr': (9,10,11)}
difference = np.square(truth - predictions)
# from now on only absolute value of truth needed
truth = np.absolute(truth)
timesteps = len(difference[0,:])
if variables == 'uhr':
u_mean = np.mean(truth[:250,:])
h_mean = np.mean(truth[250:500,:])
r_mean = np.mean(truth[500:750,:])
# relative error
u_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / u_mean
h_rmse = np.sqrt(np.mean(difference[250:500,:], axis=0)) / h_mean
r_rmse = np.sqrt(np.mean(difference[500:750,:], axis=0)) / r_mean
elif variables == 'uh':
u_mean = np.mean(truth[:250,:])
h_mean = np.mean(truth[250:500,:])
r_mean = np.zeros((timesteps))
# relative error
u_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / u_mean
h_rmse = np.sqrt(np.mean(difference[250:500,:], axis=0)) / h_mean
r_rmse = np.zeros((timesteps))
elif variables == 'ur':
u_mean = np.mean(truth[:250,:])
h_mean = np.zeros((timesteps))
r_mean = np.mean(truth[250:500,:])
# relative error
u_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / u_mean
h_rmse = np.zeros((timesteps))
r_rmse = np.sqrt(np.mean(difference[250:500,:], axis=0)) / r_mean
elif variables == 'hr':
u_mean = np.zeros((timesteps))
h_mean = np.mean(truth[:250,:])
r_mean = np.mean(truth[250:500,:])
# relative error
u_rmse = np.zeros((timesteps))
h_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / h_mean
r_rmse = np.sqrt(np.mean(difference[250:500,:], axis=0)) / r_mean
elif variables == 'u':
u_mean = np.mean(truth[:250,:])
h_mean = np.zeros((timesteps))
r_mean = np.zeros((timesteps))
# relative error
u_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / u_mean
h_rmse = np.zeros((timesteps))
r_rmse = np.zeros((timesteps))
elif variables == 'h':
u_mean = np.zeros((timesteps))
h_mean = np.mean(truth[:250,:])
r_mean = np.zeros((timesteps))
# relative error
u_rmse = np.zeros((timesteps))
h_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / h_mean
r_rmse = np.zeros((timesteps))
elif variables == 'r':
u_mean = np.zeros((timesteps))
h_mean = np.zeros((timesteps))
r_mean = np.mean(truth[:250,:])
# relative error
u_rmse = np.zeros((timesteps))
h_rmse = np.zeros((timesteps))
r_rmse = np.sqrt(np.mean(difference[:250,:], axis=0)) / r_mean
# compute running mean
u_runningMean = np.zeros((timesteps))
h_runningMean = np.zeros((timesteps))
r_runningMean = np.zeros((timesteps))
usum_ = 0.
hsum_ = 0.
rsum_ = 0.
for i in range(timesteps):
usum_ += u_rmse[i]
hsum_ += h_rmse[i]
rsum_ += r_rmse[i]
u_runningMean[i] = usum_/(i+1)
h_runningMean[i] = hsum_/(i+1)
r_runningMean[i] = rsum_/(i+1)
# compute running standard deviation
u_runningStdDev = np.zeros((timesteps))
h_runningStdDev = np.zeros((timesteps))
r_runningStdDev = np.zeros((timesteps))
usum_ = 0.
hsum_ = 0.
rsum_ = 0.
for i in range(timesteps):
usum_ += np.square(u_rmse[i] - u_runningMean[i])
hsum_ += np.square(h_rmse[i] - h_runningMean[i])
rsum_ += np.square(r_rmse[i] - r_runningMean[i])
u_runningStdDev[i] = np.sqrt(usum_/(i+1))
h_runningStdDev[i] = np.sqrt(hsum_/(i+1))
r_runningStdDev[i] = np.sqrt(rsum_/(i+1))
# dirty fix for suspiciously large last values
u_rmse[timesteps-1] = u_runningMean[timesteps-1]
h_rmse[timesteps-1] = h_runningMean[timesteps-1]
r_rmse[timesteps-1] = r_runningMean[timesteps-1]
#results = np.zeros((12, # 3 single = 3, 3 pairs = 6, 1 combined = 3
# len(size_index),
# len(nodesPerLayer_index),
# len(train_index),
# len(hiddenLayers_index)))
results = np.load(thesis_home + '/experiments/results.npy')
u_index = variable_keys[variables][0]
h_index = variable_keys[variables][1]
r_index = variable_keys[variables][2]
results[u_index,
size_index[int(size)], nodesPerLayer_index[int(nodesPerLayer)], train_index[float(train)], hiddenLayers_index[int(hiddenLayers)]] = u_runningMean[timesteps-1]
results[h_index,
size_index[int(size)], nodesPerLayer_index[int(nodesPerLayer)], train_index[float(train)], hiddenLayers_index[int(hiddenLayers)]] = h_runningMean[timesteps-1]
results[r_index,
size_index[int(size)], nodesPerLayer_index[int(nodesPerLayer)], train_index[float(train)], hiddenLayers_index[int(hiddenLayers)]] = r_runningMean[timesteps-1]
np.save(thesis_home + '/experiments/results.npy', results)