ValueError Tensor不是此图的一个元素

时间:2018-01-07 00:07:42

标签: python tensorflow keras

我的时间很糟糕。虽然我能够解决problem我的维度......但是出现了新的错误。

任务仍然是二进制图像分割。我有大小为WxH的图像以及相同大小的标签(0,255)。我将图像和标签重新缩放到imageDataGenerator中(0,1)之间的值。

我关注了the advice并使用this不错的博客条目将我的标签重新塑造为(None, W*H, 2)作为参考。

对于上采样,我使用了BilinearUpsampling2D

SO。我的标签和网络输出的形状相同。

我现在得到的错误是:

ValueError: Tensor Tensor("conv2d_1_input:0", shape=(?, 352, 128, 1), dtype=float32) is not an element of this graph.

我再次张贴整个代码,并问你是否注意到我正在做的任何愚蠢的事情。我似乎无法弄明白。 使用Python3.5,Keras(2.1.2)和tensorflow(1.4.0)后端。

def superGenerator(image_gen, label_gen,batch):
    while True:
        x = image_gen.next()[0]
        y = label_gen.next()[0]
        #process label
        class_labels_tensor = K.equal(y, 1.0)
        background_labels_tensor = K.not_equal(y, 1.0)
        bit_mask_class = K.eval(tf.to_float(class_labels_tensor))
        bit_mask_bckg = K.eval(tf.to_float(background_labels_tensor))

        forg = numpy.reshape(bit_mask_class, (-1, bit_mask_class.shape[1]*bit_mask_class.shape[2]))
        bckg = numpy.reshape(bit_mask_bckg, (-1, bit_mask_bckg.shape[1]*bit_mask_bckg.shape[2]))

        label = numpy.stack((forg,bckg),axis=2)
        #label shape = (-1, 45056, 2), network output as well. 
        yield x, label


img_height = 352
img_width = 128

train_data_dir = 'dataset/Train/Images'
train_label_dir = 'dataset/Train/Labels'
validation_data_dir = 'dataset/Validation/Images'
validation_label_dir = 'dataset/Validation/Labels'
n_train_samples = 1000
n_validation_samples = 400
epochs = 25
batch_size = 5

input_shape = (img_height, img_width, 1)
target_shape = (img_height, img_width)
model = Sequential()


#Layer 1

model.add(Conv2D(80,(11,11), input_shape=input_shape, padding='same',  activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))

#Layer 2
model.add(Conv2D(96,(7,7), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))

#Layer 3
model.add(Conv2D(128, (5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))

#Layer 4
model.add(Conv2D(160,(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2,2)))

#FC Layer 1
model.add(Conv2D(1024, (1,1), activation='relu'))

#FC Layer 2
model.add(Conv2D(512, (1,1), activation='relu'))

#Classification layer
model.add(Conv2D(2, (1,1)))

#Bilinear upsampling
model.add(BilinearUpSampling2D(target_size=(img_height, img_width)))

#Softmax
model.add(Conv2D(2, (1,1), activation='softmax'))

#Reshape, to match labels (batch, w*h, 2)
model.add(Reshape((img_height*img_width,2)))

model.summary()



model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

data_gen_args = dict(
    rescale= 1./ 255,
    rotation_range=10,
    height_shift_range=0.2,
    fill_mode='reflect',
    horizontal_flip=True,
    vertical_flip=True
    )

train_datagen = ImageDataGenerator(**data_gen_args)
train_label_datagen = ImageDataGenerator(**data_gen_args)
test_datagen = ImageDataGenerator(**data_gen_args)
test_label_datagen = ImageDataGenerator(**data_gen_args)

seed = 5

train_image_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=target_shape,
    color_mode='grayscale',
    batch_size=batch_size,
    seed=seed)
train_label_generator = train_label_datagen.flow_from_directory(
    train_label_dir,
    target_size=target_shape,
    color_mode='grayscale',
    batch_size=batch_size,
    seed=seed)

validation_image_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=target_shape,
    color_mode='grayscale',
    batch_size=batch_size,
    seed=seed)

validation_label_generator = test_label_datagen.flow_from_directory(
    validation_label_dir,
    target_size=target_shape,
    color_mode='grayscale',
    batch_size=batch_size,
    seed=seed)

train_generator = superGenerator(train_image_generator,   train_label_generator,batch_size)
test_generator = superGenerator(validation_image_generator, validation_label_generator,batch_size)

model.fit_generator(
    train_generator,
    steps_per_epoch= n_train_samples // batch_size,
    epochs=25,
    verbose=1,
    validation_data=test_generator,
    validation_steps=n_validation_samples // batch_size)

model.save_weights('first_try.h5')
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)

添加完整的追溯:

Traceback (most recent call last):
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 1070, in _run
allow_operation=False)
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 3348, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 3427, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("conv2d_1_input:0", shape=(?, 352, 128, 1), dtype=float32) is not an element of this graph.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "ObjectNet.py", line 169, in <module>
validation_steps=n_validation_samples // batch_size)
  File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
  File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 1227, in fit_generator
initial_epoch=initial_epoch)
  File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
  File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 2147, in fit_generator
class_weight=class_weight)
  File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1839, in train_on_batch
outputs = self.train_function(ins)
  File "/usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py", line 2357, in __call__
**self.session_kwargs)
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 895, in run
run_metadata_ptr)
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 1073, in _run
+ e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("conv2d_1_input:0", shape=(?, 352, 128, 1), dtype=float32) is not an element of this graph.
Exception ignored in: <function WeakValueDictionary.__init__.<locals>.remove at 0x7fb886217620>
Traceback (most recent call last):
  File "/usr/lib/python3.5/weakref.py", line 117, in remove
TypeError: 'NoneType' object is not callable

1 个答案:

答案 0 :(得分:0)

对于有类似问题的人。我发现Keras的错误非常不直观。在我的情况下,层没有错误或错误。

我的导师指出,这可能是向网络提供形状不正确的数据的问题。事实并非如此,因为输出和输入都没问题。

问题在于损失函数的选择和它所期望的形状。我仍在尝试找出要使用的损失函数,但我发现它的工作原理是dice coefficient lossmean_pairwise_squared_error