Keras Tensorflow:TypeError:无法腌制_thread.lock对象

时间:2019-11-26 16:34:51

标签: python tensorflow keras

Am学习使用深度学习(特别是U-Net模型)进行图像分割

该错误可能很小,但我无法触发

我已经尝试过多次运行此代码以进行前列腺图像分割。

我最近读过其他人发布的相同错误,但与我的错误不符。

我不知道该怎么解决。

Epoch 1/1
Exception in thread Thread-6:
Traceback (most recent call last):
  File "C:\Users\exaud\Anaconda3\lib\threading.py", line 917, in _bootstrap_inner
    self.run()
  File "C:\Users\exaud\Anaconda3\lib\threading.py", line 865, in run
    self._target(*self._args, **self._kwargs)
  File "C:\Users\exaud\Anaconda3\lib\site-packages\keras\utils\data_utils.py", line 666, in _run
    with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
  File "C:\Users\exaud\Anaconda3\lib\site-packages\keras\utils\data_utils.py", line 661, in <lambda>
    initargs=(seqs, self.random_seed))
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\context.py", line 119, in Pool
    context=self.get_context())
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\pool.py", line 177, in __init__
    self._repopulate_pool()
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\pool.py", line 238, in _repopulate_pool
    self._wrap_exception)
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\pool.py", line 257, in _repopulate_pool_static
    w.start()
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\process.py", line 112, in start
    self._popen = self._Popen(self)
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\context.py", line 322, in _Popen
    return Popen(process_obj)
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
    reduction.dump(process_obj, to_child)
  File "C:\Users\exaud\Anaconda3\lib\multiprocessing\reduction.py", line 60, in dump
    ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects

该模型基于U-Net构建,并使用Keras和TensorFlow

这是用于训练该模型的代码

from __future__ import division, print_function
import numpy as np
print()

import pydicom
from collections import defaultdict
import os, sys, pickle 
import shutil
import matplotlib.pyplot as plt
import nrrd
from skimage.transform import resize
from skimage.exposure import equalize_adapthist, equalize_hist

from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint

from keras.callbacks import LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator

from models import actual_unet, simple_unet
from metrics import dice_coef, dice_coef_loss, numpy_dice

from augmenters import elastic_transform

def dicom_to_array(img_rows, img_cols):

    for direc in ['train', 'test', 'validate']:

        fname = 'E:/Research/Codes/prostate_segmentation_u-net/data/' + direc + '_dict.pkl'
        PathDicom = 'E:/Research/Codes/prostate_segmentation_u-net/data/' + direc + '/'
        dcm_dict = dict()  # create an empty list

        for dirName, subdirList, fileList in os.walk(PathDicom):

            if any(".dcm" in s for s in fileList):
                ptn_name = dirName.split('/')[3]
                fileList = filter(lambda x: '.dcm' in x, fileList)
                indice = [ int( fname[:-4] ) for fname in fileList]
                imgs = np.zeros( [indice[-1]+1, img_rows, img_cols])

                for filename in np.sort(fileList):

                    img = pydicom.read_file(os.path.join(dirName,filename)).pixel_array.T
                    img = equalize_hist( img.astype(int) )
                    img = resize( img, (img_rows, img_cols), preserve_range=True)
                    imgs[int(filename[:-4])] = img

                dcm_dict[ptn_name] = imgs

        imgs = []
        img_masks = []

        for patient in dcm_dict.keys():
            for fnrrd in os.listdir(PathDicom):

                if fnrrd.startswith(patient) and fnrrd.endswith('nrrd'):
                    masks = np.rollaxis(nrrd.read(PathDicom + fnrrd)[0], 2)
                    rescaled = np.zeros( [ len(masks), img_rows, img_cols])
                    for mm in range(len(rescaled)):
                        rescaled[mm] = resize( masks[mm], (img_rows, img_cols), preserve_range=True)/2.0

                    masks = rescaled.copy()

                    #Check if the dimension of the masks and the images match
                    if len(dcm_dict[patient]) != len(masks) :
                        print('Dimension mismatch for', patient, 'in folder', direc)
                    else:
                        img_masks.append(masks)
                        imgs.append( dcm_dict[patient] )

                    break

        imgs = np.concatenate((imgs,imgs),axis=0, out=None).reshape(-1, img_rows, img_cols, 1)
        img_masks = np.concatenate((img_masks,img_masks),axis=0, out=None).reshape(-1, img_rows, img_cols, 1)

        #I will do only binary classification for now
        img_masks = np.array(img_masks>0.45, dtype=int)
        np.save('E:/Research/Codes/prostate_segmentation_u-net/data/' + direc + '.npy', imgs)
        np.save('E:/Research/Codes/prostate_segmentation_u-net/data/' + direc + '_masks.npy', img_masks)


def load_data():

    X_train = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/train.npy')
    y_train = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/train_masks.npy')
    X_test = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/test.npy')
    y_test = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/test_masks.npy')
    X_val = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/validate.npy')
    y_val = np.load('E:/Research/Codes/prostate_segmentation_u-net/data/validate_masks.npy')

    return X_train, y_train, X_test, y_test, X_val, y_val

# learning rate schedule
def step_decay(epoch):
    initial_lrate = 0.001
    drop = 0.5
    epochs_drop = 5
    lrate = initial_lrate * drop**int((1 + epoch) / epochs_drop)
    return lrate

def keras_fit_generator(img_rows=96, img_cols=96, n_imgs=10**4, batch_size=32, regenerate=True):

    if regenerate:
        dicom_to_array(img_rows, img_cols)
        #preprocess_data()

    X_train, y_train, X_test, y_test, X_val, y_val = load_data()

    img_rows = X_train.shape[1]
    img_cols = img_rows

    # we create two instances with the same arguments
    data_gen_args = dict(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=90.,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
        zoom_range=0.2)#,
        #preprocessing_function=elastic_transform)

    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)

    # Provide the same seed and keyword arguments to the fit and flow methods
    seed = 1
    image_datagen.fit(X_train,seed=seed)
    mask_datagen.fit(y_train, seed=seed)

    image_generator = image_datagen.flow(X_train, batch_size=batch_size, seed=seed)

    mask_generator = mask_datagen.flow(y_train, batch_size=batch_size, seed=seed)

    #from itertools import izip
    #from itertools import zip_longest

    try:
      from itertools import izip
    except ImportError:  #python3.x
      izip = zip

    train_generator = zip(image_generator, mask_generator)


    model = simple_unet( img_rows, img_cols)
    model.load_weights('E:/Research/Codes/prostate_segmentation_u-net/data/weights.h5')

    model.summary()


    model_checkpoint = ModelCheckpoint(
        'E:/Research/Codes/prostate_segmentation_u-net/data/weights.h5', monitor='val_loss', save_best_only=True)



    lrate = LearningRateScheduler(step_decay)

    model.compile(  optimizer=Adam(), loss=dice_coef_loss, metrics=[dice_coef, 'binary_accuracy'])

    model.fit_generator(
                        train_generator, 
                        steps_per_epoch=n_imgs//batch_size,
                        epochs=1,
                        verbose=1,
                        shuffle=True,
                        validation_data=(X_val, y_val),
                        callbacks=[model_checkpoint,lrate],
                        use_multiprocessing=True
                        )


    score = model.evaluate(X_test, y_test, verbose=2)

    print()
    print('Test accuracy:', score[1])

import time

start = time.time()
keras_fit_generator(img_rows=96, img_cols=96, regenerate=True,
                   n_imgs=15*10**4, batch_size=32)

end = time.time()

print('Elapsed time:', round((end-start)/60, 2 ) )

0 个答案:

没有答案