FailedPreconditionError(参见上面的回溯):尝试使用未初始化的值model0 / W5

时间:2018-06-14 08:12:33

标签: python var init

启动此代码时,出现错误:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value model0/W5

已执行声明,

sess.run(tf.global_variables_initializer())

什么是问题?

import os
import cv2
import numpy as np
import sklearn.preprocessing as skp
import tensorflow as tf

将mnist图像转换为numpy后,

if __name__ == "__main__" :

    Train_dir = "c:\\image\\MNIST\\trainingSet\\"
    Test_dir = "c:\\image\\MNIST\\testSet\\"

    train_folder_list = np.array(os.listdir(Train_dir))
    test_folder_list = np.array(os.listdir(Test_dir))

    integer_encoder = skp.LabelEncoder()
    integer_encoded = integer_encoder.fit_transform(train_folder_list).reshape(len(train_folder_list), 1)
    test_label_encoded = integer_encoder.fit_transform(test_folder_list).reshape(len(test_folder_list), 1)

    onehot_encoder = skp.OneHotEncoder(sparse = False)
    onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
    test_onehot_encoded = onehot_encoder.fit_transform(test_label_encoded

    train_image_set = []
    train_label_set = []

    test_image_set = []
    test_label_set = []

    for index in range(len(train_folder_list)) :

        path_train = os.path.join(Train_dir, train_folder_list[index]) + '\\'
        path_test = os.path.join(Test_dir, test_folder_list[index]) + '\\'

        train_image_list = np.array(os.listdir(path_train))
        test_image_list = np.array(os.listdir(path_test))

        for image in zip(train_image_list, test_image_list) :

            train_image_dir = os.path.join(path_train, image[0])
            test_image_dir = os.path.join(path_test, image[1])

            train_img = cv2.imread(train_image_dir, cv2.IMREAD_GRAYSCALE)
            test_img = cv2.imread(test_image_dir, cv2.IMREAD_GRAYSCALE)

            train_image_set.append([np.array(train_img)]) #-> 4dim, list
            train_label_set.append([np.array(onehot_encoded[index])])

            test_image_set.append([np.array(test_img)])
            test_label_set.append([np.array(test_onehot_encoded[index])])

    train_image_set = np.reshape(train_image_set, (-1, 784)) #->2dim, np.ndar
    train_label_set = np.reshape(train_label_set, (-1, 10))
    test_image_set = np.reshape(test_image_set, (-1, 784)) #->2dim, np.ndar
    test_label_set = np.reshape(test_label_set, (-1, 10))

    train_image_set = np.array(train_image_set).astype(np.float32)
    train_label_set = np.array(train_label_set).astype(np.float32)
    test_image_set = np.array(test_image_set).astype(np.float32)
    test_label_set = np.array(test_label_set).astype(np.float32)


print("\n--------------------Image transforming to numpy has completed--------------------\n")

开始了cnn

class CNN_model :

def __init__(self, sess, name) :

    self.sess = sess
    self.name = name
    self._build_net()

def _build_net(self) :

    with tf.variable_scope(self.name) :

        self.X = tf.placeholder(tf.float32, shape = [None, 784])
        X_img = tf.reshape(self.X, [-1, 28, 28, 1])
        self.Y = tf.placeholder(tf.float32, shape = [None, 10])
        self.keep_prob = tf.placeholder(tf.float32)

        W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev = 0.01), name = 'Weight1')
        L1 = tf.nn.conv2d(X_img, W1, strides = [1, 1, 1, 1], padding = 'SAME')
        L1 = tf.nn.relu(L1)
        L1 = tf.nn.max_pool(L1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')

        W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01), name = 'Weight2')
        L2 = tf.nn.conv2d(L1, W2, strides = [1, 1, 1, 1], padding = 'SAME')
        L2 = tf.nn.relu(L2)
        L2 = tf.nn.max_pool(L2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')

        W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01), name = 'Weight3')
        L3 = tf.nn.conv2d(L2, W3, strides = [1, 1, 1, 1], padding = 'SAME')
        L3 = tf.nn.relu(L3)
        L3 = tf.nn.max_pool(L3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')

        L3 = tf.reshape(L3, [-1, 4*4*128])

        W4 = tf.get_variable("W4", shape = [4*4*128, 625], initializer = tf.contrib.layers.xavier_initializer())
        b4 = tf.Variable(tf.random_normal([625]), name = 'bias4')
        L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
        L4 = tf.nn.dropout(L4, keep_prob = self.keep_prob)

        W5 = tf.get_variable("W5", shape = [625, 10], initializer = tf.contrib.layers.xavier_initializer())
        b5 = tf.Variable(tf.random_normal([10]), name = 'bais5')
        self.logits = tf.matmul(L4, W5) + b5
        #hypothesis = tf.nn.softmax(logits)

        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.Y))
        #self.cost = tf.reduce_mean(-tf.reduce_sum(self.Y*tf.log(hypothesis), axis = 1))
        self.optimizer = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(self.cost)

        predict_correct = tf.equal(tf.argmax(tf.nn.softmax(self.logits), 1), tf.argmax(self.Y, 1))
        self.Accuracy = tf.reduce_sum(tf.cast(predict_correct, tf.float32))

def train(self, X_data, Y_data, keep_prob = 0.7) :

    return self.sess.run([self.cost, self.optimizer], feed_dict = { self.X : X_data, self.Y : Y_data, self.keep_prob : keep_prob })

def test(self, X_data, Y_data, keep_prob = 1.0) :

    return self.sess.run(self.Accuracy, feed_dict = { self.X : X_data, self.Y : Y_data, self.keep_prob : keep_prob })

def predict(self, X_data, keep_prob = 1.0) :

    return self.ses.run(self.logits, feed_dict = { self.X : X_data, self.keep_prob : keep_prob })

tf.reset_default_graph()

sess = tf.Session()

sess.run(tf.global_variables_initializer())

total_epoch = 5
batch_size = 100
model_num = 3
model_list = []

train_data = train_image_set
train_label = train_label_set
test_data = test_image_set
test_label = test_label_set

for i in range(model_num) :

    model_list.append(CNN_model(sess, "model" + str(i)))

print("\n--------------------CNN_Learning has started--------------------\n")

for epoch in range(total_epoch) :

    avg_cost_list = np.zeros(len(model_list))
    total_batch = int(len(train_data)/ batch_size)

    for i in range(total_batch) :

        start = ((i+1)*batch_size) - batch_size
        end = (i+1)*batch_size

        batch_xs = train_data[start : end]
        batch_ys = train_label[start : end]

        for m_idx, m in enumerate(model_list) :

            c, _ = m.train(batch_xs, batch_ys)
            avg_cost_list[m_idx] += c/total_batch

    print("Epoch : ", epoch + 1, "Cost : ", avg_cost_list)

print("\n--------------------CNN_Learning has completed--------------------\n")

但我得到一个例外:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value model0/beta1_power
         [[Node: model0/beta1_power/read = Identity[T=DT_FLOAT, _class=["loc:@model0/Adam/Assign"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](model0/beta1_power)]]

0 个答案:

没有答案