TypeError:训练keras模型时无法腌制_thread.lock对象

时间:2019-06-04 16:04:35

标签: python tensorflow keras

我试图用keras模型训练一个unet,而在训练第一个时期时我得到了类型错误:

  

TypeError:无法腌制_thread.lock对象,请参见下面的错误消息

我已经尝试降级tensorflow并在 ... tensorflow \ contrib \ legacy_seq2seq \ python \ ops seq2seq.py 脚本中添加脚本类似的问题,但这并不能解决我的问题。

我的代码:

def train_network():
    conf = get_train_conf()
    epochs = conf['epochs']
    batch_size = conf['batch_size']

    train_path = get_data_path('train')
    validation_path = get_data_path('valid')
    print('data input paths are:\n {}\n and:\n {}'.format(train_path, 
    validation_path))

    print('counting samples..')
    print('number of train, validation samples are:')
    print(count_samples(train_path), count_samples(validation_path))

    # train data
    image_generator_train = 
    image.ImageDataGenerator(**conf['augmentation_args'])
    mask_generator_train = 
    image.ImageDataGenerator(**conf['augmentation_args'])
    train_generator = get_generator(batch_size, train_path + '/unmasked', 
    True, image_generator_train, color_mode='grayscale')
    train_generator_mask = get_generator(batch_size, train_path + '/masked', 
    True, mask_generator_train, color_mode=conf['mask_color_mode'])
    train_gen = zip(train_generator, train_generator_mask)
    num_train_samples = train_generator.n

    # validation data - don't augment
    image_generator_validation = image.ImageDataGenerator()
    mask_generator_validation = image.ImageDataGenerator()
    valid_generator = get_generator(batch_size, validation_path + '/unmasked', 
    True, image_generator_validation, color_mode='grayscale')
    valid_generator_mask = get_generator(batch_size, validation_path + 
    '/masked', True, mask_generator_validation, 
    color_mode=conf['mask_color_mode'])
    valid_gen = zip(valid_generator, valid_generator_mask)
    num_valid_samples = valid_generator.n

    steps_per_epoch, validation_steps = (conf['steps_per_epoch'], 
    conf['validation_steps'])
    print('{} rounds of training will take place'.format(epochs))
    print('{} training steps per round'.format(steps_per_epoch))
    print('{} validation steps per round'.format(validation_steps))
    print('(with a batch size of {})'.format(batch_size))
    print('{} training samples will be generated per epoch'.format(batch_size 
    * steps_per_epoch))
    print('{} validation samples will be generated per 
    epoch'.format(batch_size * validation_steps))
    answer = input("would you like to continue? y/n?\n").lower()
    if not answer.lower() in ('yes', 'y'):
        return

    model = None
    print(conf)
    if conf['train_from_checkpoint']:
        print('loading model from last checkpoint')
        model = 
    pw_evaluate.load_model_from_wts(pw_evaluate.get_latest_model_file_name())
    else:
        model = unet(classes=conf['num_classes'])
        #model = get_simple_unet()

    model.compile(
        optimizer=keras.optimizers.Adam(
        lr=conf['learning_rate']),
        loss=conf['loss_function'],
        metrics=conf['metrics'])

    #model.compile(optimizer=keras.optimizers.Adam(2e-4), 
    loss=pw_loss.dice_coef_loss, metrics=conf['metrics'])

    time_st = str(datetime.utcfromtimestamp(time.time()).strftime('%Y-%m-%d 
    %H:%M:%S'))
    weight_saver = ModelCheckpoint('weights.' + time_st + '.{epoch:02d}- 
    {val_loss:.2f}.h5',
        save_best_only=True,
        save_weights_only=True,
        verbose=1,
        period=conf['period'])

    hist = model.fit_generator(
        train_gen,
        validation_data=valid_gen,
        validation_steps=validation_steps,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=[weight_saver],
        verbose=1,
        use_multiprocessing=True)

    plot_file_name = 'training_plot_' + time_st + '.pdf'
    print('finished training, plotting history to:')
    print(plot_file_name)
    figures = [loss_figure(hist), accuracy_figure(hist)]
    plot_history(plot_file_name, figures)

    log_file_name = 'log_results_' + time_st + '.txt'
    print('logging results to:')
    print(log_file_name)
    log_results(hist, log_file_name, conf)

完整的堆栈跟踪:

  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\threading.py", line 917, in _bootstrap_inner
    self.run()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\threading.py", line 865, in run
    self._target(*self._args, **self._kwargs)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\utils\data_utils.py", line 666, in _run
    with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\utils\data_utils.py", line 661, in <lambda>
    initargs=(seqs, self.random_seed))
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 119, in Pool
    context=self.get_context())
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\pool.py", line 176, in __init__
    self._repopulate_pool()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\pool.py", line 241, in _repopulate_pool
    w.start()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
    self._popen = self._Popen(self)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 322, in _Popen
    return Popen(process_obj)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
    reduction.dump(process_obj, to_child)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py", line 60, in dump
    ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects

Exception in thread Thread-29:
Traceback (most recent call last):
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\threading.py", line 917, in _bootstrap_inner
    self.run()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\threading.py", line 865, in run
    self._target(*self._args, **self._kwargs)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\utils\data_utils.py", line 666, in _run
    with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\utils\data_utils.py", line 661, in <lambda>
    initargs=(seqs, self.random_seed))
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 119, in Pool
    context=self.get_context())
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\pool.py", line 176, in __init__
    self._repopulate_pool()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\pool.py", line 241, in _repopulate_pool
    w.start()
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
    self._popen = self._Popen(self)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 322, in _Popen
    return Popen(process_obj)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
    reduction.dump(process_obj, to_child)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py", line 60, in dump
    ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects

Using TensorFlow backend.
Using TensorFlow backend.
2019-06-04 11:45:59.121653: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2019-06-04 11:45:59.122454: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Traceback (most recent call last):
  File "<string>", line 1, in <module>
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
    exitcode = _main(fd)
Traceback (most recent call last):
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 115, in _main
  File "<string>", line 1, in <module>
    self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
    exitcode = _main(fd)
  File "C:\Users\esp13\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 115, in _main
    self = reduction.pickle.load(from_parent)
EOFError: Ran out of input

1 个答案:

答案 0 :(得分:0)

根据pickle _thread.lock方法中的use_multiprocessing=True参数指定了fit_generator()错误。可能的根本原因是使用了不是线程安全的生成器。因此,多重进程试图更改相同的数据。

通过设置use_multiprocessing=False禁用此功能有助于避免该错误。