如何实现以TF1到TF2编写的基于Matmul的nn

时间:2019-09-21 11:51:32

标签: tensorflow tensorflow2.0 tf.keras

我想实现用TF1到TF2编写的简单的基于Matmul的神经网络。

Here is source.(不要介意韩国语的评论,这是用朝鲜语编写的教程)

所以我找到了“如何将TF1迁移到TF2”,我知道我必须删除占位符。

这是我的总体代码:

import tensorflow as tf
import numpy as np

x_data = np.array(
    [[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])

y_data = np.array([
    [1, 0, 0],
    [0, 1, 0],
    [0, 0, 1],
    [1, 0, 0],
    [1, 0, 0],
    [0, 0, 1]
])

x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)

W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))

Layer1 = tf.matmul(x_data, W1) + b1
Layer1 = tf.nn.relu(Layer1)

model = tf.matmul(Layer1, W2) + b2

cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_data, logits=model)
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)

optimizer.minimize(loss=lambda: cost , var_list=[W1, W2, b1, b2])

我发现了Issue with AdamOptimizer,并且将损失更改为lambda。

然后,出现此错误:

ValueError: No gradients provided for any variable: ['Variable:0', 'Variable:0', 'Variable:0', 'Variable:0'].

我不知道如何解决它。

所以我想知道在TF2中实现这样的正确方法。

1 个答案:

答案 0 :(得分:0)

好的。我经历了official guide for eager execution,终于做到了。

代码如下:

import tensorflow as tf
import numpy as np

x_data = np.array(
    [[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])

y_data = np.array([
    [1, 0, 0],
    [0, 1, 0],
    [0, 0, 1],
    [1, 0, 0],
    [1, 0, 0],
    [0, 0, 1]
])

x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)

class Model(tf.keras.Model):
    def __init__(self):
        super(Model, self).__init__()
        self.W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
        self.W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
        self.b1 = tf.Variable(tf.zeros([10]))
        self.b2 = tf.Variable(tf.zeros([3]))
    def _calc_layer(x, w, b):
        return tf.matmul(x, w) + b
    def __call__(self, x):
        layer1 = tf.nn.relu(Model._calc_layer(x_data, self.W1, self.b1))
        return Model._calc_layer(layer1, self.W2, self.b2)

def cost(model, inputs, targets):
    return tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=targets,
                                                logits=model(inputs))
    )

model = Model()

def cost_tominimize():
    return cost(model, x_data, y_data)

optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)

for i in range(100):
    optimizer.minimize(loss=cost_tominimize,
                        var_list=model.trainable_variables)
    #print(cost_tominimize().numpy())

#test
prediction = tf.argmax(model(x_data), 1)
target = tf.argmax(y_data, 1)
print("prediction : ", prediction.numpy())
print("real : ", target.numpy())

is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('accuracy: %.2f%%' % (accuracy * 100))

也有不使用类的版本:

import tensorflow as tf
import numpy as np

x_data = np.array(
    [[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])

y_data = np.array([
    [1, 0, 0],
    [0, 1, 0],
    [0, 0, 1],
    [1, 0, 0],
    [1, 0, 0],
    [0, 0, 1]
])

x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)

W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))

def calc_layer(x, w, b):
    return tf.matmul(x, w) + b

def model(x):
    layer1 = tf.nn.relu(calc_layer(x, W1, b1))
    return calc_layer(layer1, W2, b2)

def cost(model, inputs, targets):
    return tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=targets,
                                                logits=model(inputs))
    )

def cost_tominimize():
    return cost(model, x_data, y_data)

optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01)

for i in range(100):
    optimizer.minimize(loss=cost_tominimize,
                       var_list = [W1, W2, b1, b2])
    print(cost_tominimize().numpy())

#...and test part here...