我是Keras(和一般的ML)的新手,我正在尝试训练二元分类器。我使用加权二进制交叉熵作为损失函数,但我不确定如何测试我的实现是否正确。
这是加权二元交叉熵的准确实现吗?我该怎么测试它是不是?
You must provide a value expression on the right-hand side of the '-' operator.
At C:\Users\Timmsj\Desktop\Fixes\powershell\AD & Exchange New User Creation.ps1:128 char:26
+ $Folder = if ($SInitial - <<<< in 'a','b','c'){$AC}
+ CategoryInfo : ParserError: (:) [], ParentContainsErrorRecordException
+ FullyQualifiedErrorId : ExpectedValueExpression
答案 0 :(得分:0)
在true
与pred
损失之上,Keras火车和损失包括正则化损失。下面是一个简单的测试方案,有效地实现了binary_crossentropy
和l2
的重量损失( not 'activity')损失。
X = np.random.rand(10,1200) # (batch_size, num_samples)
Y = np.random.randint(0,2,(10,1))
class_weights = {'0':1, '1': 6}
sample_weights = np.array([class_weights[str(label[0])] for label in Y])
keras_loss = model.evaluate(X,Y,sample_weight=sample_weights)
custom_loss = binary_crossentropy(Y, model.predict(X), sample_weights) # + other losses
def binary_crossentropy(y_true, y_pred, sample_weight=1):
y_pred = [max(min(pred[0], 1-K.epsilon()), K.epsilon()) for pred in y_pred]
y_true,y_pred,sample_weight = force_2d_shape([y_true,y_pred,sample_weight])
logits = np.log(y_pred) - np.log(1-y_pred) # sigmoid inverse
neg_abs_logits = -np.abs(logits)
relu_logits = (logits > 0)*logits
loss_vec = relu_logits - logits*y_true + np.log(1 + np.exp(neg_abs_logits))
return np.mean(sample_weight*loss_vec)
def force_2d_shape(arr_list):
for arr_idx, arr in enumerate(arr_list):
if len(np.array(arr).shape) != 2:
arr_list[arr_idx] = np.atleast_2d(arr).T
return arr_list
def l2_weight_loss(model):
l2_loss = 0
for layer in model.layers:
for attribute in layer.__dict__:
if layer.__dict__[attribute] is not None:
if 'kernel_regularizer' in attribute:
l2_lambda = layer.kernel_regularizer.l2
l2_loss += l2_lambda*np.sum(layer.get_weights()[0]**2)
if 'bias_regularizer' in attribute:
l2_lambda = layer.bias_regularizer.l2
l2_loss += l2_lambda*np.sum(layer.get_weights()[1]**2)
return l2_loss
ipt = Input(shape=(1200,))
x = Dense(60, activation='relu', kernel_regularizer=l2(1e-3))(ipt)
x = Dense(12, activation='relu', bias_regularizer =l2(1e-4))(x)
out = Dense(1, activation='sigmoid')(x)
model = Model(ipt,out)
model.compile(loss='binary_crossentropy', optimizer='adam')
keras_loss = model.evaluate(X,Y,sample_weight=sample_weights)
custom_loss = binary_crossentropy(Y, model.predict(X), sample_weights)
custom_loss += l2_weight_loss(model)
print('%.6f'%keras_loss + ' -- keras_loss')
print('%.6f'%custom_loss + ' -- custom_loss')
3.263891-keras_loss
3.263891-custom_loss