在Keras中,我检查了回调机制。 但是在开始训练之前它没有提供任何信息,就像输出总是在epoch = 1之后一样。我想在第一次前馈时检查损失函数的值,如何实现呢? 谢谢。
此答案无效。 “设置model.trainable = False,然后训练模型”。How to perform feed forward propagation in CNN using Keras?
我在编译模型之前设置了model.trainable = False,但是模型仍然输出不同的损失函数,这很奇怪,应该输出一个恒定的损失,即执行前馈时的损失。
The code is in the following:
from keras import backend as K
from keras.models import Model
from keras.layers import Dense, Input
from keras.models import Sequential
import numpy as np
import random
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers.core import Dropout,Activation,Flatten,Lambda
from keras.layers.normalization import BatchNormalization
import keras
import time
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from ann_visualizer.visualize import ann_viz;
def gen_x(n,p,rho):
if abs(rho) < 1 :
beta=np.sqrt(rho/(1-rho))
x0=np.random.normal(size=(n,p))
z=np.random.normal(size=(n,1))
x=beta*np.repeat(z,repeats=p,axis=1)+x0
if abs(rho)==1:
x=np.repeat(z,repeats=p,axis=1)
return x
## This function creates true survival times as described in section 3 of the paper. In all simulations we set snr (signal to noise ratio) to 3.
def genecoef(p):
#return list( map(lambda x : np.power(-1,x)*np.exp(-0.1*(x-1)), np.arange(1,p+1,1)) )
return list( np.random.rand(p) )
def gen_times(x,snr):
n,p=x.shape
coef=genecoef(p)
f=np.matmul(np.matrix(x),np.matrix(coef).T)
e=np.random.normal(size=(n,1))
k=np.sqrt(np.var(f)/(snr*np.var(e)))
y=np.exp(f+k*e)
return(y)
## This function creates true survival times as described in section 3 of the paper. In all simulations we set snr (signal to noise ratio) to 3.
def gen_times_censor(x,snr):
n,p=x.shape
coef=genecoef(p)
f=np.matmul(np.matrix(x),np.matrix(coef).T)
e=np.random.normal(size=(n,1))
k=np.sqrt(np.var(f)/(snr*np.var(e)))
y=np.exp(k*e)
return(y)
def nltr(x):
y1 = x[:,0]*x[:,1]
y2 = x[:,2]*x[:,3]
y3 = x[:,4]**2
y4 = x[:,5]* (x[:,6]**2)
y5 = x[:,7]*x[:,8]* x[:,9]
y6 = 0.5 *np.exp(x[:,8]* x[:,9])
newx = np.column_stack((y1,y2,y3,y4,y5,y6))
return newx
def survdata(n,p,snr,rho):
x = gen_x(n,p,rho)
time = gen_times(x,snr)
censortime = gen_times_censor(x,snr)
y = np.apply_along_axis(np.min,1,np.column_stack((time,censortime)))
y = np.array(y)
#b==0 censored b ==1 uncensored
b = np.apply_along_axis(np.argmax,1,np.column_stack((time,censortime)))
b = np.array(b)
a = x
ordery=np.argsort(y)
a=a[ordery]
y=y[ordery]
b=b[ordery]
Rlist=[]
event_index=np.argwhere(b==1).ravel().astype(np.int32)
nsample=len(b)
nevent=sum(b)
Rlist=[]
for j in range(nevent):
Rlist+=[ list(range(np.argwhere(b==1).ravel()[j],nsample) )]
bmask = b.astype(bool)
cumlist=list(reversed(np.append(event_index,n)))
slarr=np.vectorize(lambda x:(len(x)-1))
nctrue = np.sum(slarr(Rlist))
#a:n(#samples)*p(#features) matrix,survival time from short to high
#y:survival time
#b censored(0) or not(1)
#bmask bool(b)
#nevent #uncensored
return a,y,b,bmask,nsample,nevent,event_index,Rlist,cumlist,nctrue
n=50
p=10
snr=1
rho=0.1
a,y,b,bmask,nsample,nevent,event_index,Rlist,cumlist,nctrue= survdata(n,p,snr,rho)
sc=StandardScaler()
a=nltr(a)
a=sc.fit_transform(a)
def ploss(y_true,y_pred):
#y_pred for sample x_i is the value of np.dot(x_i,beta) in the linear cox case
#y_pred is the loss for sample i
z = 0
#for j in event_index:
#z = z + K.sum(y_pred[j,0])
#z = z + K.constant(y_pred[j,0])
#z = K.sum(tf.boolean_mask(y_pred,bmask) )
#iz = K.print_tensor(tf.boolean_mask(y_pred,bmask),'y_pred_mask is')
#gz = K.print_tensor(K.gather(y_pred,event_index),'y_pred_gather is')
z = K.sum(K.gather(y_pred,event_index))
currentsum = 0
for j in range(nevent):
currentsum = currentsum + K.sum(K.exp(K.gather(y_pred,\
np.array(range(cumlist[j+1],cumlist[j])))))
z = z - K.log(currentsum)
#tempz=0
#for i in j:
#tempz = tempz + K.exp(y_pred[i,0])
#z = z - K.log(tempz)
z = -z
return z
def c_index_func(y_true, y_pred):
#y_pred is the loss for sample i
c_hat = 0
for i in range(nevent-1):
c_hat = c_hat + K.sum(K.cast(y_pred[event_index[i]+1:,0]\
<y_pred[event_index[i],0],tf.float32))
#c_hat = c_hat + K.sum(K.cast(y_pred[event_index[i]+1:,0]\
#<y_pred[event_index[i],0],float32))
return c_hat/nctrue
model=Sequential()
model.add(Dense(1,activation='linear',kernel_initializer='one',\
batch_input_shape=(a.shape[0],a.shape[1])))
#model.add(Dropout(0.2))
#model.compile(loss=ploss,optimizer='newton-raphson')
#model.compile(loss=ploss,optimizer=keras.optimizers.Adam(lr=0, beta_1=0.9, beta_2=0.999, \
#epsilon=None, decay=0.0, amsgrad=False),metrics=[c_index_func])
model.trainable=False
model.compile(loss=ploss,optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.0, \
decay=0.0, nesterov=False),metrics=[c_index_func])
model.fit(x=a,y=y,batch_size=len(a),epochs=3,verbose=2)
答案 0 :(得分:1)
为此,您可以仅使用model.evaluate(x, y)
,它将返回包含损失和指标的数组。该数组的第一个元素是给定数据的损失。只需在训练之前进行此操作,它就会给您带来最初的损失。
答案 1 :(得分:0)
很简单,只需学习rate = 0
并训练DNN,然后所有损失都是初始损失