我有一个简单的问题。模型工作正常,但我尝试通过带有Tensorflow后端的Keras CNN减少CPU内核的使用。我已经建立了几个代码示例(即Limit number of cores used in Keras),但是它对我不起作用。据我了解,我遍历了TF和Keras文档,因为每次tf.session(config)的任何组合都不起作用,每次CPU使用率约为95%。我有旧的CPU(i7 X980,所以没有AVX支持),所以我使用TF 1.5.1和Keras 2.2.4。和Python 3.6,Windows7。
我猜这部分设置有问题,但是任何组合都行不通:
from keras import backend as K
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=2,\
inter_op_parallelism_threads=2, \
allow_soft_placement=True,\
device_count = {'CPU': 1})
session = tf.Session(config=config)
K.set_session(session)
我的完整代码在下面
预先感谢您的建议和提示。
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Conv2D, Flatten, MaxPooling2D
from keras import optimizers
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
from keras import backend as K
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=2,\
inter_op_parallelism_threads=2, \
allow_soft_placement=True,\
device_count = {'CPU': 1})
session = tf.Session(config=config)
K.set_session(session)
model = Sequential()
# it does not really matter what is in model inside...
activation = 'relu'
model.add(Conv2D(128, (3,3),data_format='channels_last', input_shape=input_shape, padding='same', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Conv2D(64, (3,3),data_format='channels_last', padding='same', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Conv2D(16, (3,3),data_format='channels_last', input_shape=input_shape, padding='same', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Conv2D(8, (3,3),data_format='channels_last', padding='same', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(256, activation=activation))
model.add(Dropout(0.2))
model.add(Dense(64, activation=activation))
model.add(Dropout(0.2))
model.add(Dense(n_levels, activation='softmax'))
adam = optimizers.Adam()
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=12, verbose=1, mode='auto', baseline=None, restore_best_weights=False)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='auto', cooldown=0, min_lr=0.00001)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam)
model.summary()
history = model.fit(xtr, ytr, batch_size=32, epochs=5, verbose=2, validation_data=(xest,yest), callbacks=[learning_rate_reduction, early_stopping]) # default: batch_size=128