在张量流中实现LinearRegression模型

时间:2018-08-02 17:45:36

标签: python tensorflow machine-learning neural-network

我愿意为带有隐藏层的LinearRegression实现张量流模型。

我已经参考了这些教程来使用张量流进行线性回归,

https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py

https://pythonprogramming.net/tensorflow-neural-network-session-machine-learning-tutorial/?completed=/tensorflow-deep-neural-network-machine-learning-tutorial/

这是我的代码:

class LinearRegression:

    def __init__(self, x, y, data_type, learning_rate=0.01, epochs=100):
        """
        Constructor to initialize the instance variables

        :param x: independent variable X

        :param y: dependent variable y

        :param data_type: The data-type of dependent and independent variable

        :param learning_rate: The learning rate of the neural network. Default: 0.01

        :param epochs: Number of epochs. Default 100
        """

        data_types = {
            "int8": tf.int8,
            "int16": tf.int16,
            "int32": tf.int32,
            "int64": tf.int64,
            "float16": tf.float16,
            "float32": tf.float32,
            "float64": tf.float64
        }

        # Assign independent and dependent variables
        self.__independent_variable = x
        self.__dependent_variable = y

        # Assign data-types, learning rate and epochs
        self.data_type = data_types[data_type]
        self.learning_rate = learning_rate
        self.epochs = epochs

        # Get the shape of input
        # self.input_shape = None
        # try:
        #     self.input_shape = self.__independent_variable.shape[1]
        # except KeyError:
        #     self.input_shape = 1

        # hidden layers
        self.__hidden_layers = []

        # placeholders
        self.X = tf.placeholder(self.data_type)
        self.y = tf.placeholder(self.data_type)

        # cost function
        self.cost_function = None

        # optimizer
        self.optimizer = None

    def add_hidden_layer(self, weight_shape, activation_function="relu", nodes=100, bias=None):
        """
        This method will add the hidden layer to our neural net. Note last hidden layer you are adding will be an
        output layer.

        :param weight_shape: The shape of the weight

        :param activation_function: The activation function to be used. Supported: relu(Default), sigmoid, tanh

        :param nodes: No of nodes for the bias, default is 100

        :param bias: Default is 0's of no of nodes.

        :return: True if the layer is added else will return False
        """
        if bias is None:
            bias = tf.Variable(tf.zeros(weight_shape[1]))
        weight = tf.Variable(tf.random_uniform(weight_shape, minval=-1.0, maxval=1.0))
        layer = None

        # Count of hidden layer
        hidden_layer_count = len(self.__hidden_layers)

        # For the formula Y = mx+b
        if activation_function == "relu":
            if hidden_layer_count==0:
                layer = tf.nn.relu(tf.matmul(self.X, weight)+bias)
            else:
                layer = tf.nn.relu(tf.matmul(self.__hidden_layers[hidden_layer_count-1][0], weight)+bias)
        elif activation_function == "sigmoid":
            if hidden_layer_count == 0:
                layer = tf.sigmoid(tf.matmul(self.X, weight) + bias)
            else:
                layer = tf.sigmoid(tf.matmul(self.__hidden_layers[hidden_layer_count-1][0], weight) + bias)
        elif activation_function == "tanh":
            if hidden_layer_count == 0:
                layer = tf.tanh(tf.matmul(self.X, weight) + bias)
            else:
                layer = tf.tanh(tf.matmul(self.__hidden_layers[hidden_layer_count-1][0], weight) + bias)
        self.__hidden_layers.append([layer, weight, bias])
        print("Added layer")

    def train(self, optimizer="adam"):
        """
        This method will train the model. Only two famous optimizers are supported

        1. adam (Default)
        2. gradient_descent

        :param optimizer: Optizers will reduce the cost function. Supported: adam(Default), gradient_descent

        :return: None
        """
        hidden_layer_count = len(self.__hidden_layers)
        self.cost_function = tf.reduce_sum(tf.pow(self.__hidden_layers[hidden_layer_count-1][0]-self.y, 2)
                                           / (2*self.__dependent_variable.shape[0]))
        if optimizer == "adam":
            self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost_function)
        else:
            self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.cost_function)

        W = tf.Variable(0.40, name="weight")
        b = tf.Variable(0.40, name="bias")
        # Initialize the global variables
        init = tf.global_variables_initializer()



        with tf.Session() as session:
            session.run(init)
            for epoch in range(self.epochs):
                session.run(self.optimizer, feed_dict={self.X: self.__independent_variable, self.y: self.__dependent_variable})
                c = session.run(self.cost_function, feed_dict={self.X: self.__independent_variable, self.y: self.__dependent_variable})
                weight = session.run(self.__hidden_layers[hidden_layer_count-1][1])
                bias = session.run(self.__hidden_layers[hidden_layer_count-1][2])
                if epoch%100==0:
                    print("Weight: ", weight, "Cost: ", c, "Bias: ", bias)
            return weight, bias

我已经用一些示例输入进行了测试:

import numpy

# Dataset
train_X = numpy.asarray([[ 3.3  ],
       [ 4.4  ],
       [ 5.5  ],
       [ 6.71 ],
       [ 6.93 ],
       [ 4.168],
       [ 9.779],
       [ 6.182],
       [ 7.59 ],
       [ 2.167],
       [ 7.042],
       [10.791],
       [ 5.313],
       [ 7.997],
       [ 5.654],
       [ 9.27 ],
       [ 3.1  ]])
train_Y = numpy.asarray([[1.7  ],
       [2.76 ],
       [2.09 ],
       [3.19 ],
       [1.694],
       [1.573],
       [3.366],
       [2.596],
       [2.53 ],
       [1.221],
       [2.827],
       [3.465],
       [1.65 ],
       [2.904],
       [2.42 ],
       [2.94 ],
       [1.3  ]])

l = LinearRegression(train_X, train_Y, "float32", epochs=1000)
l.add_hidden_layer([1, 17])
#l.add_hidden_layer([10, 10])
l.add_hidden_layer([17, 1])
weight, bias = l.train(optimizer="adam")

我得到这样的输出:

Weight:  [[ 0.7321346 ]
 [ 0.30761123]
 [-0.49645996]
 [ 0.8884759 ]
 [ 0.43539715]
 [ 0.6100311 ]
 [-0.21072604]
 [ 0.8822198 ]
 [-0.8709421 ]
 [ 0.1667776 ]
 [ 0.8926308 ]
 [-0.90756726]
 [-0.41226315]
 [ 0.21427989]
 [ 0.17000389]
 [-0.9373965 ]
 [ 0.16003537]] Cost:  7.7810783 Bias:  [-0.01]
Weight:  [[ 0.7321346 ]
 [ 0.30761123]
 [-0.5783508 ]
 [ 0.8262489 ]
 [ 0.43539715]
 [ 0.6100311 ]
 [-0.28867453]
 [ 0.77843475]
 [-0.8709421 ]
 [ 0.05801708]
 [ 0.78117526]
 [-0.90756726]
 [-0.41226315]
 [ 0.21427989]
 [ 0.17000389]
 [-0.983428  ]
 [ 0.16003537]] Cost:  0.119424164 Bias:  [-0.00681436]
Weight:  [[ 0.7321346 ]
 [ 0.30761123]
 [-0.5923004 ]
 [ 0.82624584]
 [ 0.43539715]
 [ 0.6100311 ]
 [-0.30400994]
 [ 0.76900154]
 [-0.8709421 ]
 [ 0.04737379]
 [ 0.7730713 ]
 [-0.90756726]
 [-0.41226315]
 [ 0.21427989]
 [ 0.17000389]
 [-1.0034373 ]
 [ 0.16003537]] Cost:  0.08574117 Bias:  [0.09325708]

我可以清楚地看到成本函数正在减少,但是我在Tensorflow中经验不足,所以我不知道我的实现是否正确(我尽了最大努力!),也不知道如何实现预测未来的阵列。您能帮我还是提供蚂蚁指导?预先谢谢你。

0 个答案:

没有答案