Keras模型的自定义损失函数

时间:2020-05-28 12:32:06

标签: numpy tensorflow keras deep-learning pytorch

我有一个自定义损失函数,我想在Keras模型中使用它,但是它给了我以下错误,请您帮我解决这个问题。它具有简单的自定义丢失功能。我提供了易于检查的数据和模型。

自定义损失函数是最大平均差异

import keras.backend as K

def guassian_kernel( source, target, kernel_mul = 2.0, kernel_num = 5, fix_sigma = None):

    total = K.concatenate([source, target], axis=0)
    n_samples = K.int_shape(source)[0] + K.int_shape(target)[0]

    total0= tf.broadcast_to(K.expand_dims(total, 0),shape=(K.int_shape(total)[0],K.int_shape(total)[0],K.int_shape(total)[1]))
    total1= tf.broadcast_to(K.expand_dims(total, 1),shape=(K.int_shape(total)[0],K.int_shape(total)[0],K.int_shape(total)[1]))


    print(K.int_shape(total0))

    print(K.int_shape(total1))
    print(total0)
    print(total1)

    # L2_distance = ((total0-total1)**2).sum(2) 
    L2_distance = K.sum(((total0-total1)**2),axis=2) 

    if fix_sigma:
      bandwidth = fix_sigma
    else:
      bandwidth = K.sum(L2_distance.data) / (n_samples**2-n_samples)
    bandwidth /= kernel_mul ** (kernel_num // 2)


    bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]

    kernel_val = [K.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
    # print(K.sum((kernel_val),axis=0))
    # return K.sum((kernel_val),axis=0)
    return sum(kernel_val)

def MMD_loss( source, target):
  kernel_mul = 2.0
  kernel_num = 5
  # sample_weight=torch.FloatTensor(sample_weight)
  print(K.int_shape(source))
  print(source)

  print(target)

  print(K.int_shape(target)[1])
  fix_sigma  = K.constant([1e-6])
  # source=torch.FloatTensor(source)
  # target=torch.FloatTensor(target)
  # print(K.shape(source))
  batch_size = K.int_shape(source)[0]
  kernels = guassian_kernel(source, target, kernel_mul,  kernel_num,  fix_sigma)
  XX = kernels[:batch_size, :batch_size]
  YY = kernels[batch_size:, batch_size:]
  XY = kernels[:batch_size, batch_size:]
  YX = kernels[batch_size:, :batch_size]
  loss = K.mean(XX + YY - XY -YX)
  print("weighted loss........")
  # return K.sum(np.dot(loss.numpy(), sample_weight),axis=1)
  return loss #.numpy()

y_pred = [[0, 0.95, 0]]
weight = [[1, 1, 1]]
MMD_loss(K.constant(y_true),K.constant(y_pred))

数据

from sklearn.datasets import make_blobs
from matplotlib import pyplot
from pandas import DataFrame
import numpy as np
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=3, n_features=2)
# scatter plot, dots colored by class value
df = DataFrame(dict(x=X[:,0], y=X[:,1], label=y))
colors = {0:'red', 1:'blue', 2:'green'}
fig, ax = pyplot.subplots()
grouped = df.groupby('label')
for key, group in grouped:
    group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
pyplot.show()

编码

def one_hot(labels, n_class = 3):
    """ One-hot encoding """
    expansion = np.eye(n_class)
    y = expansion[:, labels-1].T
    assert y.shape[1] == n_class, "Wrong number of labels!"

    return y

型号

import keras
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
from sklearn.model_selection import train_test_split

dfclass=df['label'].values
df1 = df.drop([ "label"], axis=1).values.astype("float32")

X_train, X_test, y_train, y_test = train_test_split(df1, dfclass, train_size=0.8)
y_train = one_hot(y_train,3)
model = Sequential()
model.add(Dense(6, input_dim=2, activation='relu'))
# model.add(Dense(32, activation='relu'))
model.add(Dense(3, activation='softmax'))
adam=optimizers.Adam(lr=0.001)

# model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.compile(loss = MMD_loss, optimizer=adam, metrics=['accuracy'])
sam_weight = np.array( [1] * 80)
# sam_weight = np.ones(shape=(len(y_train),))

history = model.fit(X_train, y_train, epochs=10, batch_size=10)#,sample_weight=sam_weight)
loss, acc = model.evaluate(X_test, one_hot(y_test,3), batch_size=80,verbose=1)    
print("accuracy ",acc) 

错误

(None, None)
Tensor("dense_48_target:0", shape=(None, None), dtype=float32)
Tensor("dense_48/Softmax:0", shape=(None, 3), dtype=float32)
3
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-206-3fc90c13c85e> in <module>()
     17 
     18 # model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
---> 19 model.compile(loss = MMD_loss, optimizer=adam, metrics=['accuracy'])
     20 sam_weight = np.array( [1] * 80)
     21 # sam_weight = np.ones(shape=(len(y_train),))

6 frames
<ipython-input-205-859bcd423c45> in guassian_kernel(source, target, kernel_mul, kernel_num, fix_sigma)
      6 
      7     total = K.concatenate([source, target], axis=0)
----> 8     n_samples = K.int_shape(source)[0] + K.int_shape(target)[0]
      9 
     10     # total0 = K.expand_dims(total, 0)

TypeError: unsupported operand type(s) for +: 'NoneType' and 'NoneType'

0 个答案:

没有答案