我正在使用自定义的Keras数据生成器,代码如下:
class DataGen (keras.utils.Sequence):
def __init__(self, combined_ids, mask_ids, combined_path, mask_path, batch_size = 1, image_size = 256, PET_only = False, CT_only = False):
self.combined_ids = combined_ids
self.mask_ids = mask_ids
self.combined_path = combined_path
self.mask_path = mask_path
self.image_size = image_size
self.batch_size = batch_size
self.PET_only = PET_only
self.CT_only = CT_only
self.on_epoch_end()
def __loadcombined__ (self, id_name_combined):
#Path of the image
combined_path = os.path.join(self.combined_path, id_name_combined)
#Reading the combined image
combined_image = np.load(combined_path)
#Only PET images
if self.PET_only == True:
combined_image = combined_image[:,:,0]
combined_image = whitening(combined_image)
combined_image = crop_image(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size), interpolation = cv2.INTER_CUBIC)
#Only CT images
elif self.CT_only == True:
combined_image = combined_image[:,:,1]
combined_image = whitening(combined_image)
combined_image = crop_image(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size), interpolation = cv2.INTER_CUBIC)
#Full combined images
else:
combined_image = combined_image.astype("float32")
combined_image[:,:,0] = whitening(combined_image[:,:,0])
combined_image[:,:,1] = whitening(combined_image[:,:,1])
combined_image = crop_image_3d(combined_image, 256, 256)
#combined_image = cv2.resize(combined_image, dsize=(self.image_size,self.image_size, 2), interpolation = cv2.INTER_CUBIC)
return combined_image
def __loadmask__(self, id_name_mask):
#Path of the image
mask_path = os.path.join(self.mask_path, id_name_mask)
#Reading the mask image
mask_image = np.load(mask_path) #No normalisation needed
#Cropping the mask image to remove some useless information
mask_image = crop_image(mask_image, 256, 256)
#mask_image = cv2.resize(mask_image, dsize=(self.image_size,self.image_size), interpolation=cv2.INTER_NEAREST)
#Converting the masks to boolean data type
mask_image = mask_image.astype(bool)
return mask_image
def __getitem__(self, index):
#Generate one batch of data.
if(index+1)*self.batch_size > len(self.combined_ids):
self.batch_size = len(self.combined_ids) - index*self.batch_size
files_batch_combined = self.combined_ids[index*self.batch_size : (index+1)*self.batch_size]
files_batch_mask = self.mask_ids[index*self.batch_size : (index+1)*self.batch_size]
combined = []
mask = []
for id_name_mask in files_batch_mask:
_mask = self.__loadmask__(id_name_mask)
mask.append(_mask)
for id_name_combined in files_batch_combined:
_combined = self.__loadcombined__(id_name_combined)
combined.append(_combined)
combined = np.array(combined)
if self.PET_only == True:
combined = np.expand_dims(combined, -1)
if self.CT_only == True:
combined = np.expand_dims(combined, -1)
mask = np.array(mask)
mask = np.expand_dims(mask, -1)
return combined, mask
def on_epoch_end(self):
pass
def __len__(self):
#Denotes the number of batches per epoch
return int(np.ceil(len(self.combined_ids)/float(self.batch_size)))
这将生成一个4-D张量,其中2通道图像作为训练数据(PET / CT图像),而二进制掩码作为地面真相分割。
但是,当我尝试使用此生成器训练标准U-Net时,它会经历10批训练数据,然后再给我这个错误:
无效的参数:TypeError:
generator
产生了一个元素 无法转换为预期的类型。预期的类型是 float32,但产生的元素是 [[array([[-0.480255,-0.48197123,-0.48379683], [-0.4811588,-0.48264366,-0.48423332]
这完全让我感到困惑,因为我已经在生成器中对其进行了编码,以将我训练的多通道PET / CT数据转换为Float32(据称它是期望但没有得到的数据类型)。
我已经尝试使用model.fit和model.fit_generator进行此操作。下面的代码
train_gen = DataGen(train_ids_combined, train_ids_mask, train_path_combined, train_path_mask, batch_size = batch_size, image_size = image_size, PET_only=True)
valid_gen = DataGen(valid_ids_combined, valid_ids_mask, train_path_combined, train_path_mask, batch_size = batch_size, image_size = image_size, PET_only=True)
test_gen = DataGen(test_ids_combined, test_ids_mask, train_path_combined, train_path_mask, batch_size = 1, image_size = image_size, PET_only=True)
train_steps = len(train_ids_combined)//batch_size
valid_steps = len(valid_ids_combined)//batch_size
test_steps = len(test_ids_combined)
history = model.fit(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
epochs=epochs, callbacks=[model_checkpoint, early_stopping])
如果任何人有任何建议和/或以前遇到过此问题,我将非常感谢您的帮助。我正在Google Colab(标准版)上实现此功能。