在使用Tensorflow急切执行时,我在keras中使用kernel_constraint=maxnorm(3)
时遇到问题。这在没有在急切执行之外使用标准Sequential
方法时工作正常,但似乎在这里失败并出现错误(这似乎是因为我不知道的乘法步骤*=
如果在这种情况下有替代品)。
问题:有没有办法在Eager Tensorflow执行框架中加入最大的$ L ^ 2 $规范功能?以下是更多细节。
以下是我激活tensorflow
的方式。
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from keras.datasets import cifar10
tf.enable_eager_execution()
以下代码可以正常使用
使用:
class ObjectDet(tf.keras.Model):
def __init__(self):
super(ObjectDet,self).__init__()
self.layer1= tf.keras.layers.Conv2D(32, (3, 3), input_shape=(32,32,3), padding='same', activation='relu')
self.layer2=tf.keras.layers.Dropout(0.2)
self.layer3=tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')
self.layer4=tf.keras.layers.MaxPooling2D(pool_size=(2,2))
self.layer5=tf.keras.layers.Flatten()
self.layer6=tf.keras.layers.Dense(512, activation='relu')
self.layer7=tf.keras.layers.Dropout(0.1)
self.layer8=tf.keras.layers.Dense(10, activation='softmax')
def call(self, input):
"""Run the model."""
result = self.layer1(input)
result = self.layer2(result)
result = self.layer3(result)
result = self.layer4(result)
result = self.layer5(result)
result = self.layer6(result)
result = self.layer7(result)
result = self.layer8(result)
return result
def loss(model, x, y):
prediction = model(x)
return cross_entropy(prediction,y)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return tape.gradient(loss_value, model.variables)
x, y = iter(train_ds).next()
print("Initial loss: {:.3f}".format(loss(model, x, y)))
# Training loop
for (i, (x, y)) in enumerate(train_ds):
# Calculate derivatives of the input function with respect to its parameters.
grads = grad(model, x, y)
# Apply the gradient to the model
optimizer.apply_gradients(zip(grads, model.variables),
global_step=tf.train.get_or_create_global_step())
if i % 200 == 0:
pass
print("Loss at step {:04d}: {:.3f}".format(i, loss(model, x, y)))
不起作用:
如果我更换
self.layer1= tf.keras.layers.Conv2D(32, (3, 3), input_shape=(32,32,3), padding='same', activation='relu')
与
self.layer1= tf.keras.layers.Conv2D(32, (3, 3), input_shape=(32,32,3), padding='same', activation='relu',kernel_constraint=maxnorm(3))
我收到了错误:
RuntimeErrorTraceback (most recent call last)
<ipython-input-74-629273c4a534> in <module>()
19
20 optimizer.apply_gradients(zip(grads, model.variables),
---> 21 global_step=tf.train.get_or_create_global_step())
22 if i % 200 == 0:
23 pass
/home/dgoldma1/.local/lib/python2.7/site-packages/tensorflow/python/training/optimizer.pyc in apply_gradients(self, grads_and_vars, global_step, name)
615 scope_name = var.op.name
616 with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
--> 617 update_ops.append(processor.update_op(self, grad))
618 if global_step is None:
619 apply_updates = self._finish(update_ops, name)
/home/dgoldma1/.local/lib/python2.7/site-packages/tensorflow/python/training/optimizer.pyc in update_op(self, optimizer, g)
166 if self._v.constraint is not None:
167 with ops.control_dependencies([update_op]):
--> 168 return self._v.assign(self._v.constraint(self._v))
169 else:
170 return update_op
/home/dgoldma1/.local/lib/python2.7/site-packages/keras/constraints.pyc in __call__(self, w)
51 norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
52 desired = K.clip(norms, 0, self.max_value)
---> 53 w *= (desired / (K.epsilon() + norms))
54 return w
55
/home/dgoldma1/.local/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.pyc in __imul__(self, unused_other)
931
932 def __imul__(self, unused_other):
--> 933 raise RuntimeError("Variable *= value not supported. Use "
934 "variable.assign_mul(value) to modify the variable "
935 "value and variable = variable * value to get a new "
RuntimeError: Variable *= value not supported. Use variable.assign_mul(value) to modify the variable value and variable = variable * value to get a new Tensor object.
谢谢!
答案 0 :(得分:1)
您似乎在代码中发现了一个错误。您可以将其提交给开发团队here。
Eager是一种最新的tensorflow,它对代码产生了深远的影响,所以它缺乏一点点润色。我不会感到惊讶的是,角落案件中的这种错误仍然会发生。