训练深度网络时发生错误

时间:2019-06-13 09:03:48

标签: python autoencoder

    from keras.layers import Input, Dense, Conv2D, MaxPooling2D,   UpSampling2D, subtract
  from keras.models import Model
 from keras import backend as K
 from keras.callbacks import ModelCheckpoint
 from keras.layers.core import Dense,Dropout,Activation,Flatten,Lambda






    MAX = 1
  GAUS_KERNEL = 3
  GAUS_SIGMA  = 1.2

 def Gaussian_Filter(kernel_size=GAUS_KERNEL, sigma=GAUS_SIGMA):

   k = (kernel_size-1)//2 
    filter = []
    sigma_2 = sigma**2
    for i in range(kernel_size):
    filter_row = []
    for j in range(kernel_size):
        Hij = np.exp(-((i+1-(k+1))**2 + (j+1-(k+1))**2)/(2*sigma_2))   /(2*np.pi*sigma_2)
         filter_row.append(Hij)
       filter.append(filter_row)

    return np.asarray(filter).reshape(kernel_size,kernel_size,1,1)

 gaussian_filter = tf.constant(Gaussian_Filter(),  tf.float32)              
  h_filter = tf.reshape(tf.constant([[-1,0,1],[-2,0,2],[-1,0,1]],    tf.float32), [3,3,1,1])    
 v_filter = tf.reshape(tf.constant([[1,2,1],[0,0,0],[-1,-2,-1]], tf.float32), [3,3,1,1])    
np_filter_0 = np.zeros((3,3,1,2))
 np_filter_0[1,0,0,0], np_filter_0[1,2,0,1] = 1,1 
filter_0 = tf.constant(np_filter_0, tf.float32)
 np_filter_90 = np.zeros((3,3,1,2))
 np_filter_90[0,1,0,0], np_filter_90[2,1,0,1] = 1,1 
filter_90 = tf.constant(np_filter_90, tf.float32)
 np_filter_45 = np.zeros((3,3,1,2))
    np_filter_45[0,2,0,0], np_filter_45[2,0,0,1] = 1,1 
        filter_45 = tf.constant(np_filter_45, tf.float32)
     np_filter_135 = np.zeros((3,3,1,2))
      np_filter_135[0,0,0,0], np_filter_135[2,2,0,1] = 1,1 
    filter_135 = tf.constant(np_filter_135, tf.float32)

   np_filter_sure = np.ones([3,3,1,1]); np_filter_sure[1,1,0,0] = 0
     filter_sure = tf.constant(np_filter_sure, tf.float32)
    border_paddings = tf.constant([[0,0],[1,1],[1,1],[0,0]])

        def Border_Padding(x, pad_width):
          for _ in range(pad_width): x = tf.pad(x, border_paddings,  'SYMMETRIC')
      return x

  def FourAngles(d):
       d0   =   tf.to_float(tf.greater_equal(d,157.5))+tf.to_float(tf.less(d,22.5))
   d45  =  tf.to_float(tf.greater_equal(d,22.5))*tf.to_float(tf.less(d,67.5))
      d90  = tf.to_float(tf.greater_equal(d,67.5))*tf.to_float(tf.less(d,112.5))
       d135 = tf.to_float(tf.greater_equal(d,112.5))*tf.to_float(tf.less(d,157.5))
        return (d0,d45,d90,d135)


      def TF_Canny(img_tensor, minRate=0.10,  maxRate=0.40,preserve_size=True, remove_high_val=False, return_raw_edges=False):
       img_tensor = (img_tensor)*MAX
           if preserve_size: img_tensor = Border_Padding(img_tensor, (GAUS_KERNEL-1)//2)

       x_gaussian = tf.nn.convolution(img_tensor, gaussian_filter, padding='VALID')
        if remove_high_val: x_gaussian = tf.clip_by_value(x_gaussian, 0, MAX/2)


      if preserve_size: x_gaussian = Border_Padding(x_gaussian, 1)
       Gx = tf.nn.convolution(x_gaussian, h_filter, padding='VALID')
         Gy = tf.nn.convolution(x_gaussian, v_filter, padding='VALID')
       G        = tf.sqrt(tf.square(Gx) + tf.square(Gy))
         BIG_PHI = tf.atan2(Gy,Gx)
      BIG_PHI   = (BIG_PHI*180/np.pi)%180       
        D_0,D_45,D_90,D_135 = FourAngles(BIG_PHI)   


      targetPixels_0 = tf.nn.convolution(G, filter_0, padding='SAME')
          isGreater_0 = tf.to_float(tf.greater(G*D_0, targetPixels_0))
isMax_0 = isGreater_0[:,:,:,0:1]*isGreater_0[:,:,:,1:2]

         targetPixels_90 = tf.nn.convolution(G, filter_90, padding='SAME')
         isGreater_90 = tf.to_float(tf.greater(G*D_90, targetPixels_90))
          isMax_90 = isGreater_90[:,:,:,0:1]*isGreater_90[:,:,:,1:2]

          targetPixels_45 = tf.nn.convolution(G, filter_45, padding='SAME')
        isGreater_45 = tf.to_float(tf.greater(G*D_45, targetPixels_45))
        isMax_45 = isGreater_45[:,:,:,0:1]*isGreater_45[:,:,:,1:2]

         targetPixels_135 = tf.nn.convolution(G, filter_135, padding='SAME')
       isGreater_135 = tf.to_float(tf.greater(G*D_135, targetPixels_135))
        isMax_135 = isGreater_135[:,:,:,0:1]*isGreater_135[:,:,:,1:2]

         edges_raw = G*(isMax_0 + isMax_90 + isMax_45 + isMax_135)
        edges_raw = tf.clip_by_value(edges_raw, 0, MAX)

        if return_raw_edges: return tf.squeeze(edges_raw)

         edges_sure = tf.to_float(tf.greater_equal(edges_raw, maxRate))
          edges_weak = tf.to_float(tf.less(edges_raw, maxRate))*tf.to_float(tf.greater_equal(edges_raw, minRate))

        edges_connected = tf.nn.convolution(edges_sure, filter_sure, padding='SAME')*edges_weak
           for _ in range(10): edges_connected = tf.nn.convolution(edges_connected, filter_sure, padding='SAME')*edges_weak

        edges_final = edges_sure +  tf.clip_by_value(edges_connected,0,MAX)
return tf.squeeze(edges_final)

  def edge_loss(y_true, y_pred):
       y_true = tf.cast(y_true,dtype = tf.float32)
      y_pred = tf.cast(y_pred,dtype = tf.float32)
       E = TF_Canny(y_true, return_raw_edges=False)
      E = 1 - E
        E = tf.cast(E,dtype = tf.float32)
        A = y_true - y_pred
           A = tf.cast(A,dtype = tf.float32)
         E = tf.reshape(E,(8,128,128,3))
       LP = K.abs(A)
        LP = tf.cast(LP,dtype = tf.float32)
          SQ = K.mean(K.square(y_pred - y_true), axis=-1)

           LE = tf.multiply(E,LP)
        LE = tf.cast(LE,dtype = tf.float32)

        SQ1 = K.mean(K.square(LE), axis=-1)

        los = 0.7 * SQ + 0.3 * SQ1
           return los









     input_img = Input(shape=(128,128,3))

       x = Conv2D(64, (3, 3), activation='relu', padding='same')             (input_img)
       x = Conv2D(64, (3, 3), strides = (2, 2), activation='relu',           padding='same')(x)
   # x = Conv2D(64, (3, 3), strides = (2, 2), activation='relu',  padding='same')(x)
   encoded = Conv2D(3, (3,3), activation='sigmoid', padding='same')(x)

          inp = Lambda(bicub)(encoded)
      dup = inp

     model = Conv2D(64, (3, 3), padding='same',        kernel_initializer='he_normal')(inp)
   model = Activation('relu')(model)
       model = Conv2D(64, (3, 3), padding='same',        kernel_initializer='he_normal')(model)
 model = Activation('relu')(model)
      model = Conv2D(64, (3, 3), padding='same',        kernel_initializer='he_normal')(model)
    model = Activation('relu')(model)
    model = Conv2D(64, (3, 3), padding='same',  kernel_initializer='he_normal')(model)
   model = Activation('relu')(model)
     model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
   model = Activation('relu')(model)

    model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
 model = Activation('relu')(model)
   model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
 model = Activation('relu')(model)
  model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
 model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
 model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
  model = Activation('relu')(model)

  model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
     model = Activation('relu')(model)
       model = Conv2D(64, (3, 3), padding='same',         kernel_initializer='he_normal')(model)
    model = Activation('relu')(model)
       model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
     model = Conv2D(64, (3, 3), padding='same',  kernel_initializer='he_normal')(model)
    model = Activation('relu')(model)
      model = Conv2D(64, (3, 3), padding='same',          kernel_initializer='he_normal')(model)
     model = Activation('relu')(model)

  model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
   model = Activation('relu')(model)
  model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
     model = Activation('relu')(model)
      model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
    model = Activation('relu')(model)
     model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
   model = Activation('relu')(model)
   res_img = Conv2D(3, (3, 3), padding='same', kernel_initializer='he_normal')(model)

output_img =添加([dup,res_img])

output_img =激活('sigmoid')(output_img)

  model = Model(input_img, res_img)

    # b = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999,       epsilon=None, decay=0.0, amsgrad=False)

model.compile(optimizer = b,loss = residualmse,metrics = [SSIM,M_SSIM,negSSIM,“ accuracy”])

    model.summary()


   model.compile(optimizer= 'adam', loss=edge_loss)
   mc = keras.callbacks.ModelCheckpoint('drive/My          Drive/altweights2/weights{epoch}.hdf5', save_best_only=False,      period=1,verbose = 1)
       model.fit(x_train2, x_train2,
       validation_data = (x_train2, x_train2),
            epochs=100,
            batch_size=8,
            shuffle=True,
            callbacks=[mc])      

先生,当我使用网络训练数据集但使用2次时,两次具有不同的损失函数。一个是定制的损失函数,另一个是定义的均方误差函数。在两种条件相同的情况下,当进行均方误差训练时,它的训练效果很好,但是对于另一方训练时,它显示出以下误差, 我在网络上检查了很多,但找不到任何错误。 需要帮助

      NotFoundError                             Traceback (most          recent call last)

     <ipython-input-31-1b5a177291a2> in <module>()
      6                 batch_size=8,
      7                 shuffle=True,
  ----> 8                 callbacks=[mc,TensorBoardColabCallback(tbc)   ])

 5 frames

       /usr/local/lib/python3.6/dist-packages/tensorflow/python  /framework             /errors_impl.py in __exit__(self, type_arg,    value_arg, traceback_arg)
        526             None, None,
          527                                compat.as_text(c_api.TF_Message(self.status.status)),
          --> 528             c_api.TF_GetCode(self.status.status))
   529     # Delete the underlying status object from memory otherwise it stays alive
    530     # as there is a reference to status from this from the       traceback due to

         NotFoundError: No algorithm worked!
           [[{{node loss_6/conv2d_92_loss/convolution}}]]
          [[{{node training_5/Adam/gradients/lambda_4/truediv_grad                                  /Reshape}}]]

当我在Jupyter笔记本中运行它时,它显示此错误

InvalidArgumentError                      Traceback (most recent call   last)
    <ipython-input-20-292832ca5d2b> in <module>()
     6                 batch_size=8,
     7                 shuffle=True,
----> 8                 callbacks=[mc])

 /home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages   /keras/engine/training.pyc in fit(self, x, y, batch_size, epochs, verbose,    callbacks, validation_split, validation_data, shuffle, class_weight,   sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1037                                           initial_epoch=initial_epoch,
  1038                                               steps_per_epoch=steps_per_epoch,

-> 1039验证步骤=验证步骤)    1040    1041 defvaluate(self,x = None,y = None,

/home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages /keras/engine/training_arrays.pyc in fit_loop(model,f,ins,out_labels,batch_size,历元,详细,回调, val_f,val_ins,随机播放,callback_metrics,initial_epoch,steps_per_epoch,validation_steps)        197 ins_batch [i] = ins_batch [i] .toarray()        198     -> 199个出局= f(ins_batch)        200次= to_list(outs)        zip中的l,o为201(out_labels,outs):

/home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages /keras/backend/tensorflow_backend.pyc in __call__(self, inputs)
  2713                 return self._legacy_call(inputs)
  2714 

-> 2715返回self._call(输入)       2716其他:        2717 if py_any(is_tensor(x)for x in input):

  /home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages  /keras/backend/tensorflow_backend.pyc in _call(self, inputs)
  2673             fetched = self._callable_fn(*array_vals,   run_metadata=self.run_metadata)
  2674         else:
  -> 2675             fetched = self._callable_fn(*array_vals)
    2676         return fetched[:len(self.outputs)]
    2677 

/home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages     /tensorflow/python/client/session.pyc in __call__(self, *args)
   1452         else:
  1453           return tf_session.TF_DeprecatedSessionRunCallable(
-> 1454               self._session._session, self._handle, args,    status, None)
   1455 
   1456     def __del__(self):

/home/titanxpascal/Documents/myenv/local/lib/python2.7/site-packages   /tensorflow/python/framework/errors_impl.pyc in __exit__(self, type_arg,    value_arg, traceback_arg)
   517             None, None,
   518             compat.as_text(c_api.TF_Message(self.status.status)),
  --> 519             c_api.TF_GetCode(self.status.status))
  520     # Delete the underlying status object from memory otherwise it    stays alive
   521     # as there is a reference to status from this from the   traceback due to

  InvalidArgumentError: input and filter must have the same depth: 3 vs 1
    [[Node: loss_7/conv2d_23_loss/convolution = Conv2D[T=DT_FLOAT,   data_format="NCHW", dilations=[1, 1, 1, 1], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:GPU:0"](loss_7/conv2d_23_loss/convolution-0-TransposeNHWCToNCHW-LayoutOptimizer, Const_9)]]
      [[Node: training_7/Adam/gradients/loss_7/conv2d_23_loss/sub_2_grad/Shape/_3001 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_507_training_7/Adam/gradients/loss_7/conv2d_23_loss/sub_2_grad/Shape", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]

0 个答案:

没有答案