我正在尝试使用以下代码输出训练有素的模型来输出它对分割任务的预测。
import h5py
import tifffile as tiff
from cnn_functions import nikon_getfiles, get_image, run_models_on_directory, get_image_sizes, segment_nuclei, segment_cytoplasm, dice_jaccard_indices
from model_zoo import sparse_bn_feature_net_31x31 as cyto_fn
import os
import numpy as np
direc_name = "C:/Users/Zein/Documents/Neural_Networks/CNN/"
data_location = os.path.join(direc_name, 'RawImages')
cyto_location = os.path.join(direc_name, 'Cytoplasm')
mask_location = os.path.join(direc_name, 'Masks')
cyto_channel_names = ['phase']
trained_network_cyto_directory = "C:/Users/Zein/Documents/Neural_Networks/CNN/trained_networks/"
cyto_prefix = "2017-03-06_Kcells_all_31x31_bn_feature_net_31x31_
win_cyto = 15
image_size_x, image_size_y = get_image_sizes(data_location, cyto_channel_names)[0:2]
list_of_cyto_weights = []
for j in range(2):
cyto_weights = os.path.join(trained_network_cyto_directory, cyto_prefix + str(j) + ".h5")
list_of_cyto_weights += [cyto_weights]
cytoplasm_predictions = run_models_on_directory(data_location, cyto_channel_names, cyto_location, model_fn = cyto_fn,
list_of_weights = list_of_cyto_weights, image_size_x = image_size_x, image_size_y = image_size_y,
win_x = win_cyto, win_y = win_cyto, split = False)
cytoplasm_masks = segment_cytoplasm(cytoplasm_predictions, nuclear_masks = nuclear_masks, mask_location = mask_location, smoothing = 1, num_iters = 120)
但是我收到以下错误。
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-3ce4003728e5> in <module>()
1 cytoplasm_predictions = run_models_on_directory(data_location, cyto_channel_names, cyto_location, model_fn = cyto_fn,
2 list_of_weights = list_of_cyto_weights, image_size_x = image_size_x, image_size_y = image_size_y,
----> 3 win_x = win_cyto, win_y = win_cyto, split = False)
C:\Users\Zein\Documents\Neural_Networks\CNN\cnn_functions.py in run_models_on_directory(data_location, channel_names, output_location, model_fn, list_of_weights, n_features, image_size_x, image_size_y, win_x, win_y, std, split, process, save)
1480
1481 batch_input_shape = (1,len(channel_names),image_size_x+win_x, image_size_y+win_y)
-> 1482 model = model_fn(batch_input_shape = batch_input_shape, n_features = n_features, weights_path = list_of_weights[0])
1483 n_features = model.layers[-1].output_shape[1]
1484
C:\Users\Zein\Documents\Neural_Networks\CNN\model_zoo.py in sparse_bn_feature_net_31x31(batch_input_shape, n_features, reg, init, weights_path)
353 model.add(BatchNormalization(axis=1))
354 model.add(Activation('relu'))
--> 355 model.add(sparse_MaxPooling2D(pool_size=(2, 2), strides=(d, d)))
356 d *= 2
357 model.add(Conv2DTranspose(64,3, strides=d, kernel_initializer=init, padding='valid', kernel_regularizer=l2(reg)))
C:\Users\Zein\Anaconda3\envs\TF352\lib\site-packages\keras\models.py in add(self, layer)
464 output_shapes=[self.outputs[0]._keras_shape])
465 else:
--> 466 output_tensor = layer(self.outputs[0])
467 if isinstance(output_tensor, list):
468 raise TypeError('All layers in a Sequential model '
C:\Users\Zein\Anaconda3\envs\TF352\lib\site-packages\keras\engine\topology.py in __call__(self, inputs, **kwargs)
583
584 # Actually call the layer, collecting output(s), mask(s), and shape(s).
--> 585 output = self.call(inputs, **kwargs)
586 output_mask = self.compute_mask(inputs, previous_mask)
587
C:\Users\Zein\Documents\Neural_Networks\CNN\cnn_functions.py in call(self, x, mask)
1128 strides=self.strides,
1129 border_mode=self.border_mode,
-> 1130 dim_ordering=self.dim_ordering)
1131 return output
1132
C:\Users\Zein\Documents\Neural_Networks\CNN\cnn_functions.py in _pooling_function(self, inputs, pool_size, strides, border_mode, dim_ordering)
1121 def _pooling_function(self, inputs, pool_size, strides,
1122 border_mode, dim_ordering):
-> 1123 output = sparse_pool(inputs, pool_size = pool_size, stride = strides[0])
1124 return output
1125
C:\Users\Zein\Documents\Neural_Networks\CNN\cnn_functions.py in sparse_pool(input_image, stride, pool_size, mode)
252 for offset_x in range(stride):
253 for offset_y in range(stride):
--> 254 pooled_array +=[pool_2d(input_image[:, :, offset_x::stride, offset_y::stride], pool_size, stride = (1,1), mode = mode, pad = (0,0), ignore_border = True)]
255 counter += 1
256
C:\Users\Zein\Anaconda3\envs\TF352\lib\site-packages\theano\tensor\signal\pool.py in pool_2d(input, ws, ignore_border, stride, pad, mode, ds, st, padding)
127 pad = padding
128
--> 129 if input.ndim < 2:
130 raise NotImplementedError('pool_2d requires a dimension >= 2')
131 if ignore_border is None:
AttributeError: 'Tensor' object has no attribute 'ndim'
我使用带有Tensorflow后端的Keras,但是pool2_d功能来自Theano。这是问题还是Keras可以在同一个脚本中使用TF和Theano的功能?或者这个电话可能只是折旧了?