调整数据集大小
import cv2
from PIL import Image
import numpy as np
import h5py
#threshold function is disabled when resizing input images
path = "masks_train1/"
for i in range(1,220):
dim = (256, 256) #(w,h)
image = cv2.imread(path + str(i) + ".jpg", 0)
resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
(thresh, im_bw) = cv2.threshold(resized, 128, 255, cv2.THRESH_BINARY)
cv2.imwrite('mask_train/' + str(i) + '.png', im_bw)
images = []
masks = []
for i in range(1, 220):
img = Image.open("masks_train1/" + str(i) + ".jpg")
arr = np.array(img)
images.append(arr)
img = Image.open("mask_train/" + str(i) + ".png")
arr = np.array(img)
arr = np.expand_dims(arr, -1)
masks.append(arr)
images = np.array(images)
masks = np.array(masks)
masks.shape
输出:(219, 256, 256, 1)
with h5py.File("Dataset_train1.h5", 'w') as hdf:
hdf.create_dataset('images', data=images, compression='gzip', compression_opts=9)
hdf.create_dataset('masks', data=masks, compression='gzip', compression_opts=9)
Unet 训练和测试文件
import numpy as np
import os
import cv2
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras import backend as keras
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
def unet(input_size = (256, 256, 3)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6])
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7])
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'
)(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8])
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9])
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal'
)(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
import h5py
print('*'*30)
print('Loading and preprocessing train data...')
print('*'*30)
file = h5py.File('Dataset_train1.h5', 'r')
imgs_train = file.get('images')
imgs_mask_train = file.get('masks')
imgs_train = np.array(imgs_train)
imgs_mask_train = np.array(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255 # scale masks to [0, 1]
print('*'*30)
print('Creating and compiling model...')
print('*'*30)
model = unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
tensorboard = TensorBoard(log_dir='tensorboard/', write_graph=True, write_images=True)
model.summary()
Model: "functional_7"
总参数:31,032,837 可训练参数:31,032,837 不可训练的参数:0
print('*'*30)
print('Fitting model...')
print('*'*30)
history = model.fit(imgs_train, imgs_mask_train, batch_size=15, epochs=30, verbose=2, shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint, tensorboard])
**output problem**
******************************
Fitting model...
******************************
Epoch 1/30
WARNING:tensorflow:Model was constructed with shape (None, 256, 256, 3) for input Tensor("input_5:0", shape=(None, 256, 256, 3), dtype=float32), but it was called on an input with incompatible shape (None, 600, 1200, 3).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-69f1645fb376> in <module>
2 print('Fitting model...')
3 print('*'*30)
----> 4 history = model.fit(imgs_train, imgs_mask_train, batch_size=15, epochs=30, verbose=2, shuffle=True,
5 validation_split=0.2,
6 callbacks=[model_checkpoint, tensorboard])
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
812 # In this case we have not created variables on the first call. So we can
813 # run the first trace but we should fail if variables are created.
--> 814 results = self._stateful_fn(*args, **kwds)
815 if self._created_variables:
816 raise ValueError("Creating variables on a non-first call to a function"
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2826 """Calls a graph function specialized to the inputs."""
2827 with self._lock:
-> 2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3208 and self.input_signature is None
3209 and call_context_key in self._function_cache.missed):
-> 3210 return self._define_function_with_shape_relaxation(args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\function.py in _define_function_with_shape_relaxation(self, args, kwargs)
3139 expand_composites=True)
3140
-> 3141 graph_function = self._create_graph_function(
3142 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
3143 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
<块引用>
ValueError:在用户代码中:
C:\Users\peash\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py:806
train_function * 返回 step_function(self, iterator) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function ** 输出 = model.distribute_strategy.run(run_step, args=(data,)) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 跑步 返回 self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica 返回 self._call_for_each_replica(fn, args, kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica 返回 fn(*args, **kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py:789 运行步骤** 输出 = model.train_step(data) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\training.py:747 train_step y_pred = self(x, training=True) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:985 致电 输出 = call_fn(输入,*args,**kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\functional.py:385 称呼 返回 self._run_internal_graph( C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\functional.py:508 _run_internal_graph 输出 = node.layer(*args, **kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:985 致电 输出 = call_fn(输入,*args,**kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\merge.py:183 称呼 返回 self._merge_function(inputs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\merge.py:522 _merge_function 返回 K.concatenate(inputs,axis=self.axis) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\util\dispatch.py:201 包装纸 返回目标(*args,**kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\backend.py:2881 连接 return array_ops.concat([to_dense(x) for x in tensors],axis) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\util\dispatch.py:201 包装纸 返回目标(*args,**kwargs) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\ops\array_ops.py:1654 连接 返回 gen_array_ops.concat_v2(值=值,轴=轴,名称=名称) C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:1220 concat_v2 _, _, _op, _outputs = _op_def_library._apply_op_helper( C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\op_def_library.py:742 _apply_op_helper op = g._create_op_internal(op_type_name,输入,dtypes=None, C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\func_graph.py:591 _create_op_internal return super(FuncGraph, self)._create_op_internal(#pylint: disable=protected-access C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py:3477 _create_op_internal ret = 操作( C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py:1974 初始化 self._c_op = _create_c_op(self._graph,node_def,输入, C:\Users\peash.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py:1815 _create_c_op 引发 ValueError(str(e))
ValueError: Dimension 1 in both shapes must be equal, but are 75 and 74. Shapes are [?,75,150] and [?,74,150]. for '{{node
functional_7/concatenate_13/concat}} = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32](functional_7/dropout_8/dropout/Mul_1, 功能_7/conv2d_93/Relu,功能_7/concatenate_13/concat/axis)' 输入形状: [?,75,150,512], [?,74,150,512], [] 和 计算的输入张量:input[2] = <3>.
尝试重塑训练数据,但仍然遇到此问题。