回归:如何近似一个多维函数?

时间:2018-11-23 20:43:23

标签: python neural-network regression

我想使用神经网络来近似一个sinc函数。这是我的代码:

import tensorflow as tf
from keras.layers import Dense
from keras.models import Sequential


N = 10000

x1 = numpy.empty((N,))
x2 = numpy.empty((N,))
x3 = numpy.empty((N,))
z1 = numpy.empty((N,))
z2 = numpy.empty((N,))
y = numpy.empty((N,))


for i in range(N):
    x1[i] = random.uniform(-10, 10)
    x2[i] = random.uniform(-10, 10)
    x3[i] = random.uniform(-10, 10)

z1 = x1 + x2 - x3
z2 = -x1 + x2 + x3

for i in range(N):
    y[i] = (numpy.sin(z1[i])/z1[i])*(numpy.sin(z2[i])/z2[i])

y = y.reshape(-1, 1)

scaler = MinMaxScaler()

x1 = scalar.fit_transform(x1)
x2 = scalar.fit_transform(x2)
x3 = scalar.fit_transform(x3)

y = scalar.fit_transform(y)

model = Sequential()
model.add(Dense(50, activation='relu', input_shape=(3,)))
model.add(Dense(1, activation='relu'))

model.fit(X, y, epochs=50, verbose=1, batch_size=2)

我已经看到类似的代码,其中XY是两个一维矩阵,但就我而言,我应该使用三个输入值来计算y。对于我的案例,上面的代码中的X是什么?换句话说,如何使用Xx1x2创建x3

1 个答案:

答案 0 :(得分:1)

这是张量流的一个小解决方案:

#-*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import numpy.random as rd
import matplotlib.pyplot as plt

batch_size = 16


def generate_input(N=10000):
    x = rd.uniform(low=-10, high=10, size=(3, N))
    z1 = x[0] + x[1] - x[2]
    z2 = -x[0] + x[1] + x[2]
    return [np.expand_dims(np.stack((z1, z2), axis=0), axis=2) for _ in range(batch_size)]


def target_func(data):
    return [(np.sin(x[0, :]) / x[0, :]) * (np.sin(x[1, :]) / x[1, :]) for x in data]


def main():
    x = tf.placeholder(tf.float32, shape=(batch_size, 2, None, 1), name='inputs')
    y = tf.placeholder(tf.float32, shape=(batch_size, None, 1), name='target')

    x_sum = tf.reduce_sum(x, axis=1)
    fc1 = tf.layers.dense(inputs=x_sum, units=32, activation=tf.nn.relu, name="fc1")
    fc2 = tf.layers.dense(inputs=fc1, units=16, activation=tf.nn.relu, name="fc2")
    output = tf.layers.dense(inputs=fc2, units=1, activation=None, name="output")
    predict = output

    losses = tf.reduce_sum(tf.square(y - predict, name="loss"))

    train_step = tf.train.AdamOptimizer().minimize(losses)

    saver = tf.train.Saver()
    max_train_iter = 500
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(max_train_iter):
            inp = generate_input()
            target = target_func(inp)
            sess.run(train_step, feed_dict={x: inp, y: target})
            if i % (max_train_iter // 10) == 0:
                val_inp = generate_input()
                loss_val = sess.run(losses, feed_dict={x: val_inp, y: target_func(val_inp)})
                print ('Step:%d, Loss:%f' % (i, loss_val))

        saver.save(sess, 'sin/model', global_step=i)
        test_inp = generate_input(50)
        truth = target_func(test_inp)
        pred = sess.run(predict, feed_dict={x: test_inp})
        plt.figure()
        rang = np.linspace(-10, 10, num=50)
        plt.plot(rang, truth[0], label='target')
        plt.plot(rang, pred[0], label='prediction')
        plt.legend()
        plt.savefig('sin/'+'graph_'+str(i)+'.png')


if __name__ == '__main__':
    main()

它不会产生很好的预测。您将不得不使用体系结构和参数来进行改进。但这有效。训练后的测试示例如下所示。

enter image description here