我已经创建了多层模型,现在我想教它数百个值,以便它可以预测来自不同输入的输出。但是我应该如何执行这些输入?我现在尝试在数组中制作一些数组。并使用训练功能逐一输入和输出输入。但是,似乎它第二次重新进行自我教学,并且只能正确预测第二个答案。也许我不明白这个概念?
import tensorflow as tf
import numpy as np
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
x = np.array([[[10, 10, 30, 20], [20, 10, 20, 10],]])
y = np.array([[[10, 10, 100, 10], [100, 10, 10, 10],]])
class Model(object):
def __init__(self, x, y):
# get random values.
self.W = tf.Variable(tf.random.normal((len(x), len(x[0][0]))))
self.b = tf.Variable(tf.random.normal((len(y),)))
self.W1 = tf.Variable(tf.random.normal((len(x), len(x[0][0]))))
self.b1 = tf.Variable(tf.random.normal((len(y),)))
self.W2 = tf.Variable(tf.random.normal((len(x), len(x[0][0]))))
self.b2 = tf.Variable(tf.random.normal((len(y),)))
def __call__(self, x):
out1 = tf.multiply(x, self.W) + self.b
out2 = tf.multiply(out1, self.W1) + self.b1
last_layer = tf.multiply(out2, self.W2) + self.b2
# Input_Leyer = self.W * x + self.b
return last_layer
def loss(predicted_y, desired_y):
return tf.reduce_sum(tf.square(predicted_y - desired_y))
optimizer = tf.optimizers.Adam(0.1)
# noinspection PyPep8Naming
def train(model, inputs, outputs):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
grads = t.gradient(current_loss, [model.W, model.b, model.W1, model.b1, model.W2, model.b2])
optimizer.apply_gradients(zip(grads, [model.W, model.b, model.W1, model.b1, model.W2, model.b2]))
print(current_loss)
model = Model(x, y)
for i in range(5000):
train(model, x[0][0], y[0][0])
for i in range(10000):
train(model, x[0][1], y[0][1])
for i in range(3):
InputX = np.array([
[input(), input(), input(), input()],
])
#returning = tf.math.multiply(InputX, model.W, name=None )
first = tf.multiply(InputX, model.W)
second = tf.multiply(first, model.W1)
returning = tf.multiply(second, model.W2)
print("I predict:", returning)