然而,它很容易耗尽我所有的GPU内存,不知道为什么?
with tf.name_scope('data'):
center_words = tf.placeholder(tf.int32, shape=[BATCH_SIZE], name='center_words')
target_words = tf.placeholder(tf.int32, shape=[BATCH_SIZE, 1], name='target_words')
with tf.name_scope("embedding_matrix"):
embed_matrix = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBED_SIZE], -1.0, 1.0), name="embed_matrix")
with tf.name_scope("loss"):
embed = tf.nn.embedding_lookup(embed_matrix, center_words, name="embed")
nce_weight = tf.Variable(tf.truncated_normal([VOCAB_SIZE, EMBED_SIZE], stddev=1.0/(EMBED_SIZE ** 0.5)), name="nce_weight")
nce_bias = tf.Variable(tf.zeros([VOCAB_SIZE]), name="nce_bias")
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight, biases=nce_bias, labels=target_words, inputs=embed, num_sampled=NUM_SAMPLED, num_classes=VOCAB_SIZE), name="loss")
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
total_loss = 0.0 # we use this to calculate the average loss in the last SKIP_STEP steps
writer = tf.summary.FileWriter('./graphs/no_frills/', sess.graph)
for index in range(NUM_TRAIN_STEPS):
centers, targets = next(batch_gen)
loss_batch, _ = sess.run([loss, optimizer], feed_dict={center_words:centers, target_words:targets})
total_loss += loss_batch
if (index + 1) % SKIP_STEP == 0:
print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
total_loss = 0.0
writer.close()
答案 0 :(得分:5)
这是默认的张量流行为。如果要将GPU内存分配限制为仅需要,请在会话配置中指定。
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
或者,您可以指定要使用的GPU内存的最大分数:
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf.Session(config=config)
它没有很好的文档记录,但如果你想做更多的事情,message definitions就是你要开始的地方。