Tensorflow - 训练后检索训练的前馈神经网络的权重/偏差

时间:2017-12-05 19:06:35

标签: python tensorflow neural-network keras data-science

我目前正在尝试使用Flask为交互式神经网络培训创建一个简单的Web应用程序。我正在努力的是在训练前馈神经网络后检索隐藏层的权重 - 我的目标是为Tensorflow's Playground建立一个真正的后端。

考虑以下权重初始化:

# Weight initializations
tW1 = init_weights(shape=(n_features, hidden_nodes))
tW2 = init_weights(shape=(hidden_nodes, output_nodes))

如何在Tensorflow中完成培训后检索tW1tW2的计算权重?

以下是代码示例:

def retrieve_data():
    """Retrieves the data - to be expanded for custom database access + S3 retrieval + URL"""

    result = pd.read_csv('snp_data.csv', parse_dates=['Date'], index_col=['Date'])
    return result

def get_columns(data, columns):
    features = data.ix[:, columns]
    return features

def preprocess(data):
    """Data preprocessing"""
    result = (data - data.mean()) / data.std(ddof=0)
    result = result.fillna(0)
    return result

def init_weights(shape):
    """ Weights initialization """
    weights = tf.random_normal(shape=shape, stddev=0.1)
    return tf.Variable(weights)

def forwardprop(X, w_1, w_2):
    """Forward propagation"""
    h = tf.nn.relu(tf.matmul(X, w_1))
    y_hat = tf.matmul(h, w_2)
    return y_hat

# @app.route('/train')
def train():
    data = retrieve_data()

    train_x = get_columns(data, columns=['Open', 'Close'])
    train_x = preprocess(data=train_x).as_matrix().astype(np.float32)
    train_x = train_x[:(len(train_x) - (len(train_x) % 32))]

    train_y = get_columns(data, columns=['Adj Close']).as_matrix().astype(np.float32)
    train_y = train_y[:(len(train_y) - (len(train_y) % 32))]

    # Number of input nodes
    n_features = train_x.shape[1]

    # Number of output nodes
    output_nodes = train_y.shape[1]

    # Number of hidden nodes
    hidden_nodes = 20

    # TF Placeholders for the inputs and outputs
    tx = tf.placeholder(tf.float32, shape=(None, n_features))
    ty = tf.placeholder(tf.float32, shape=(None, output_nodes))

    # Weight initializations
    tW1 = init_weights(shape=(n_features, hidden_nodes))
    tW2 = init_weights(shape=(hidden_nodes, output_nodes))

    # Forward propagation
    y_hat = forwardprop(tx, tW1, tW2)

    # Backward Propagation
    tMSE = tf.reduce_mean(tf.square(y_hat - ty))
    learning_rate = 0.001
    tOptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    tOptimize = tOptimizer.minimize(tMSE)

    batch_size = 32
    n_epochs = 8

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        for i_e in range(n_epochs):
            for i in range(0, train_x.shape[0], batch_size):
                batch_X = train_x[i:i + batch_size, ...]
                batch_y = train_y[i:i + batch_size]

                _, loss = sess.run([tOptimize, tMSE], feed_dict={tx: batch_X, ty: batch_y})
                print(i, loss)
    return 'Flask Dockerized'

1 个答案:

答案 0 :(得分:1)

final_tW1, final_tW2 = sess.run([tW1, tW2])循环完成后,这应该与for一样简单。您不需要提供任何内容,因为变量会保留自己不依赖于占位符的值。