我正在使用PyCharm Community Edition和Python 3.7。通过Anaconda,我已经安装了Tensorflow机器学习包。
我正在遵循Google Tensorflow回归教程here,但输出却很有限。在输出控制台和单独的窗口中分别仅显示数值结果和plt
。
附件链接中提到的数据表,编码为:
# Prints the stored data table in a formatted fashion (ERROR)
df = pd.DataFrame(train_data, columns = column_names)
df.head()
似乎没有生成自己的窗口,如链接所示。如何在新窗口中显示该表,就像显示plt
一样?
这是我的main.py:
# Importing the different libraries and packages
# Machine Learning is mainly executed by the TENSORFLOW LIBRARY
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
TFVersion = tf.__version__
newline = "\n"
print(newline)
print("Current version of TensorFlow: ", TFVersion)
# Downloading the Boston Housing Data Set - it is already present in the keras
# NOTE: This will be referred to as the "BHD"
boston_housing = keras.datasets.boston_housing
# Initializing the training + testing data and labels as per the information suggested in the BHD
(train_data, train_labels), (test_data, test_labels) = boston_housing.load_data()
# Shuffle the training set in order to assure randomness - this condition is required for any statistical analysis
order = np.argsort(np.random.random(train_labels.shape))
train_data = train_data[order]
train_labels = train_labels[order]
# Printing the training and testing data sets (the .shape member function gets the examples and feature frequency
# from the train_data vector instance)
print("Training set: {}".format(train_data.shape)) # 404 examples, 13 features
print("Testing set: {}".format(test_data.shape)) # 102 examples, 13 features
# Initializing the variables/attributes for the data-set
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
# Prints the stored data table in a formatted fashion (ERROR)
df = pd.DataFrame(train_data, columns = column_names)
df.head()
# Display first 10 entries
print(train_labels[0:10])
# TEST DATA is *not* used when calculating the mean and std
# Normalize data - these are basically z-scores
mean = train_data.mean(axis = 0)
std = train_data.std(axis = 0)
train_data = (train_data - mean)/std
test_data = (test_data - mean)/std
print(train_data[0]) # First training sample, normalized
def build_model():
model = keras.Sequential([
keras.layers.Dense(64, activation = tf.nn.relu,
input_shape = (train_data.shape[1],)),
keras.layers.Dense(64, activation = tf.nn.relu),
keras.layers.Dense(1)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
model = build_model()
model.summary()
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 500
# Store training stats
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
import matplotlib.pyplot as plt
plt.interactive(False)
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [1000$]')
plt.plot(history.epoch, np.array(history.history['mean_absolute_error']),
label='Train Loss')
plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']),
label='Val loss')
plt.legend()
plt.ylim([0, 5])
plot_history(history)
[loss, mae] = model.evaluate(test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: ${:7.2f}".format(mae * 1000))
test_predictions = model.predict(test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [1000$]')
plt.ylabel('Predictions [1000$]')
plt.axis('equal')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.hist(error, bins=50)
plt.xlabel("Prediction Error [1000$]")
_ = plt.ylabel("Count")
plt.show()
# https://www.youtube.com/watch?v=voLSnXi4hAI