TensorFlow:多项式回归

时间:2018-08-17 11:29:59

标签: python-3.x tensorflow artificial-intelligence regression non-linear-regression

我正在尝试从此链接提供的CSV数据中获得非线性回归: CSV Data

我想使用多项式回归。问题是我从TensorFlow得到的结果是“无”。我找不到问题。我认为模型或成本函数有问题。有人可以帮忙吗?任何帮助将不胜感激。

# importing modules
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import time

# defining the method for gathering data
# date_idx is the column number of date in the .CSV file
def read(filename, date_idx, date_parse, year, bucket=7):

    # the amount of days in the year : 365 days
    days_in_year = 365

    # defining a dictionary for the frequency
    freq = {}
    # here we are calculating hao many buckets each frequency have?
    # buckets = (which is 7, and by that we mean each frequency is 7 days)
    # we are initializing each frequency with zero
    for period in range(0, int(days_in_year / bucket)):
        freq[period] = 0

    # this opens the file in binary mode('rb' : 'r' for read, 'b' is for binary mode)
    with open(filename, 'r') as csvfile:
        csvreader = csv.reader(csvfile)
        next(csvreader)                   # this escapes the first row since it consists of headers only
        for row in csvreader:
            if row[date_idx] == '':        # each row consists of many columns but if the date is
                continue                   # is unavailable there is no need to check the data
            t = time.strptime(row[date_idx], date_parse)    # converting to the input format
            if t.tm_year == year and t.tm_yday < (days_in_year-1):  # we want the data in specific year
                freq[int(t.tm_yday / bucket)] += 1          # finding the frequency
    return freq

# here i call the method to gather data for me
freq = read(r'C:\My Files\Programming\Python\TensorFlow\CallCenter\311_Call_Center_Tracking_Data__Archived_.csv',
            0, '%m/%d/%Y', 2014)

# here we convert our dictionary into 2 arrays or lists in python
x_temp =[]
y_temp =[]
for key, value in freq.items():
    x_temp.append(key)
    y_temp.append(value)

x_data = np.asarray(x_temp)
y_data = np.asarray(y_temp)

# visualizing the data
plt.scatter(x_data,y_data)
plt.show()

# splitting data with ratio into 2 group : training and test
def split_dataset(x_dataset, y_dataset, ratio):
    arr = np.arange(x_dataset.size)
    np.random.shuffle(arr)
    num_train = int(ratio*x_dataset.size)
    x_train = x_dataset[arr[0:num_train]]
    y_train = y_dataset[arr[0:num_train]]
    x_test = x_dataset[arr[num_train:x_dataset.size]]
    y_test = y_dataset[arr[num_train:y_dataset.size]]
    return x_train,y_train,x_test,y_test

x_train, y_train, x_test, y_test = split_dataset(x_data,y_data, ratio=0.7)

# here we create some place holder for input and output of the session
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

# defining global variables
learning_rate = 0.01
training_epochs = 100
num_coeffs = 5

# adding regularization (for later use)
#reg_lambda = 0.

# defining the coefficients of the polynomial
w = tf.Variable([0.]*num_coeffs, name='parameter')

# defining the model
def model(X,w):
    terms = []
    for i in range(num_coeffs):
        term = tf.multiply(w[i], tf.pow(X, i))
        terms.append(term)
    return tf.add_n(terms)

y_model = model(X,w)

# defining the cost function
cost = tf.reduce_sum(tf.pow(Y-y_model,2))

# defining training method
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# initilizing all variables
init = tf.global_variables_initializer()

#runing the model
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(training_epochs):
        training_cost = sess.run(train_op, feed_dict={X:x_train, Y:y_train})
        print(training_cost)

        final_cost = sess.run(cost,feed_dict={X: x_test, Y:y_test})
        print('Final cost = {}'.format(training_cost))

3 个答案:

答案 0 :(得分:0)

问题在于training_cost = sess.run(train_op, feed_dict={X:x_train, Y:y_train})不返回训练成本,因为train_op是使用梯度下降更新参数的操作,而不是计算成本函数的操作。

如果要获得培训费用,则应执行以下操作:

_, training_cost = sess.run([train_op, cost], feed_dict={X:x_train, Y:y_train})

cost是您先前定义为cost = tf.reduce_sum(tf.pow(Y-y_model,2))的操作

答案 1 :(得分:0)

我将代码更改如下:

# importing modules
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import time

# defining the method for gathering data
# date_idx is the column number of date in the .CSV file
from pylint.checkers.raw_metrics import get_type


def read(filename, date_idx, date_parse, year, bucket=7):

    # the amount of days in the year : 365 days
    days_in_year = 365

    # defining a dictionary for the frequency
    freq = {}
    # here we are calculating hao many buckets each frequency have?
    # buckets = (which is 7, and by that we mean each frequency is 7 days)
    # we are initializing each frequency with zero
    for period in range(0, int(days_in_year / bucket)):
        freq[period] = 0

    # this opens the file in binary mode('rb' : 'r' for read, 'b' is for binary mode)
    with open(filename, 'r') as csvfile:
        csvreader = csv.reader(csvfile)
        next(csvreader)                   # this escapes the first row since it consists of headers only
        for row in csvreader:
            if row[date_idx] == '':        # each row consists of many columns but if the date is
                continue                   # is unavailable there is no need to check the data
            t = time.strptime(row[date_idx], date_parse)    # converting to the input format
            if t.tm_year == year and t.tm_yday < (days_in_year-1):  # we want the data in specific year
                freq[int(t.tm_yday / bucket)] += 1          # finding the frequency
    return freq

# here i call the method to gather data for me
freq = read(r'C:\My Files\Programming\Python\TensorFlow\CallCenter\311_Call_Center_Tracking_Data__Archived_.csv',
            0, '%m/%d/%Y', 2014)

# here we convert our dictionary into 2 arrays or lists in python
x_temp =[]
y_temp =[]
for key, value in freq.items():
    x_temp.append(key)
    y_temp.append(value)

x_data = np.asarray(x_temp)
x_data = x_data.astype(float)
y_data = np.asarray(y_temp)
y_data = y_data.astype(float)

print(x_data)
print(y_data)

# visualizing the data
plt.scatter(x_data,y_data)
plt.show()

# splitting data with ratio into 2 group : training and test
def split_dataset(x_dataset, y_dataset, ratio):
    arr = np.arange(x_dataset.size)
    np.random.shuffle(arr)
    num_train = int(ratio*x_dataset.size)
    x_train = x_dataset[arr[0:num_train]]
    y_train = y_dataset[arr[0:num_train]]
    x_test = x_dataset[arr[num_train:x_dataset.size]]
    y_test = y_dataset[arr[num_train:y_dataset.size]]
    return x_train,y_train,x_test,y_test

x_train, y_train, x_test, y_test = split_dataset(x_data,y_data, ratio=0.7)
print(type(x_train[0]))
# here we create some place holder for input and output of the session
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

# defining global variables
learning_rate = 0.01
training_epochs = 100
num_coeffs = 5

# adding regularization (for later use)
#reg_lambda = 0.

# defining the coefficients of the polynomial
w = tf.Variable([0.]*num_coeffs, name='parameter')

# defining the model
def model(X,w):
    terms = []
    for i in range(num_coeffs):
        term = tf.multiply(w[i], tf.pow(X, i))
        terms.append(term)
    return tf.add_n(terms)

y_model = model(X,w)

# defining the cost function
cost = tf.reduce_sum(tf.pow(Y-y_model,2))

# defining training method
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# initilizing all variables
init = tf.global_variables_initializer()

#runing the model
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(training_epochs):
        _, training_cost = sess.run([train_op, cost], feed_dict={X: x_train, Y: y_train})
        print('Training_cost = {}'.format(training_cost))

        final_cost = sess.run(cost,feed_dict={X: x_test, Y:y_test})
        print('Final cost = {}'.format(training_cost))

结果从“ nan”更改为:

Training_cost = 11020688384.0
Final cost = 11020688384.0
Training_cost = 9.952021814670212e+34
Final cost = 9.952021814670212e+34
Training_cost = inf
Final cost = inf
Training_cost = inf
Final cost = inf
Training_cost = inf
Final cost = inf
Training_cost = nan
Final cost = nan
Training_cost = nan
Final cost = nan

我将所有内容都设为一个浮点数,因为乘法只接受2个浮点数。

答案 2 :(得分:0)

我只是如下更改了代码。我的代码定义现在可以正常工作了,但结果仍然存在,我仍然需要更多的优化。感谢@gcucurull,我得以将它拉开。

# importing modules
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import time

# defining the method for gathering data
# date_idx is the column number of date in the .CSV file
from pylint.checkers.raw_metrics import get_type


def read(filename, date_idx, date_parse, year, bucket=7):

    # the amount of days in the year : 365 days
    days_in_year = 365

    # defining a dictionary for the frequency
    freq = {}
    # here we are calculating hao many buckets each frequency have?
    # buckets = (which is 7, and by that we mean each frequency is 7 days)
    # we are initializing each frequency with zero
    for period in range(0, int(days_in_year / bucket)):
        freq[period] = 0

    # this opens the file in binary mode('rb' : 'r' for read, 'b' is for binary mode)
    with open(filename, 'r') as csvfile:
        csvreader = csv.reader(csvfile)
        next(csvreader)                   # this escapes the first row since it consists of headers only
        for row in csvreader:
            if row[date_idx] == '':        # each row consists of many columns but if the date is
                continue                   # is unavailable there is no need to check the data
            t = time.strptime(row[date_idx], date_parse)    # converting to the input format
            if t.tm_year == year and t.tm_yday < (days_in_year-1):  # we want the data in specific year
                freq[int(t.tm_yday / bucket)] += 1          # finding the frequency
    return freq

# here i call the method to gather data for me
freq = read(r'C:\My Files\Programming\Python\TensorFlow\CallCenter\311_Call_Center_Tracking_Data__Archived_.csv',
            0, '%m/%d/%Y', 2014)

# here we convert our dictionary into 2 arrays or lists in python
x_temp =[]
y_temp =[]
for key, value in freq.items():
    x_temp.append(key)
    y_temp.append(value)

x_data = np.asarray(x_temp)
x_data = x_data.astype(float)
y_data = np.asarray(y_temp)
y_data = y_data.astype(float)

print(x_data)
print(y_data)

# visualizing the data
#plt.scatter(x_data,y_data)
#plt.show()

# splitting data with ratio into 2 group : training and test
def split_dataset(x_dataset, y_dataset, ratio):
    arr = np.arange(x_dataset.size)
    np.random.shuffle(arr)
    num_train = int(ratio*x_dataset.size)
    x_train = x_dataset[arr[0:num_train]]
    y_train = y_dataset[arr[0:num_train]]
    x_test = x_dataset[arr[num_train:x_dataset.size]]
    y_test = y_dataset[arr[num_train:y_dataset.size]]
    return x_train,y_train,x_test,y_test

x_train, y_train, x_test, y_test = split_dataset(x_data,y_data, ratio=0.7)
print(type(x_train[0]))

print(x_train)

# defining global variables
learning_rate = 0.000001
training_epochs = 10000
num_coeffs = 5

# defining the coefficients of the polynomial
w = tf.Variable(
    tf.truncated_normal([num_coeffs,1], mean=0.0,stddev= 1.0, dtype=tf.float64))

# adding bias
b = tf.Variable(tf.zeros(1,dtype=tf.float64))

# predefining the model
def model(x, y):
    # this predicts the y based on the given weight
    temp = []
    for i in range(num_coeffs):
        temp.append(tf.add(w[i],tf.pow(x,i)))
    prediction = tf.add(tf.reduce_sum(temp),b)
    # this is the cost function
    errors =tf.square(y - prediction)
    return [prediction, errors]

# defining the model
y, cost = model(x_train, y_train)

# defining training method
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# initializing all variables
init = tf.global_variables_initializer()

#runing the model
with tf.Session() as sess:
    sess.run(init)
    for epoch in list(range(training_epochs)):
        sess.run(optimizer)
        if epoch%1000 ==0:
            print('Training cost = \n',sess.run(cost))
    print('---------------------------------------------------------------------------------')
    print('---------------------------------------------------------------------------------')
    y_prediction, cost_prediction = model(x_test, y_test)
    print(sess.run(y_prediction))

print(y_test[-1])
相关问题