python3 TensorFlow检查失败:new_num_elements == NumElements()(96 vs. 60000)

时间:2018-12-03 07:07:14

标签: python tensorflow

我正在使用Python(3.6.7)中的TensorFlow(1.12.0)编写基本的神经网络。

一切似乎都可以单独运行,但是运行model()时出现此错误消息。

F ./tensorflow/core/framework/tensor.h:663] Check failed: new_num_elements == NumElements() (96 vs. 60000)
Aborted

据我所知,我在代码中的任何地方都没有使用数字96。

我在下面提供了我的代码:

model.py:

import numpy as np
import tensorflow as tf
from preprocessing import getDataset
from parameter import initializeParameters
from forward import forwardPropagation
from cost import computeCost

def model(numberOfIterations = 10000):

    """
        Architecture:
                                        W1, b1                       W2, b2                      W3, b3                      W4, b4
        X(numberOfFeatures, numberOfImages) ---> L1(25, numberOfImages) ---> L2(25, numberOfImages) ---> L3(10, numberOfImages) ---> H(10,numberOFImages)
                                            relu                        relu                        relu                        sigmoid

        W1(25, numberOfFeatures)
        b1(25,1)
        W2(25, 25)
        b2(25, 1)
        W3(10, 25)
        b3(10, 1)
        W4(10, 10)
        b4(10, 1)

    """


    # get all dataset information
    data = getDataset()
    (numberOfFeatures, numberOfImages) = data["X_train"].get_shape()

    # initalize parameters whilst breaking symmetry
    parameters = initializeParameters(numberOfFeatures, numberOfImages)

    # construct forward propagation graph
    (hypothesis, Z4) = forwardPropagation(data["X_train"], parameters)

    # define cost tensor
    cost = computeCost(Z4, data["Y_train"])

    # create optimizer
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    # define session object
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        # train network
        for iteration in range(numberOfIterations):
            sess.run(optimizer)

            if iteration % 10 == 0:
                print("After iteration", iteration, "cost =", sess.run(cost))


model(numberOfIterations = 50)

preprocessing.py:

import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt

def getDataset(datasetInfo = True, exampleImage = False):

    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

    numberOfImages_train = np.shape(x_train)[0]
    numberOfImages_test  = np.shape(x_test)[0]

    imageDimension = (np.shape(x_train)[1], np.shape(x_train)[2])

    if datasetInfo:
        print("Number of training data images:", numberOfImages_train)
        print("Number of testing data images:", numberOfImages_test)
        print("Image dimensions:", imageDimension)

#    if exampleImage:
#        randIndex = np.random.randint(0, numberOfImages)
#        plt.imshow(X_mat[:,:,:, randIndex])


    data = {
        "X_train": tf.convert_to_tensor(np.reshape(x_train, (numberOfImages_train, -1)).T, dtype = tf.float64),
       "Y_train": tf.convert_to_tensor(y_train.T),
        "X_test": tf.convert_to_tensor(np.reshape(x_test, (numberOfImages_test, -1)).T, dtype = tf.float64),
        "Y_test": tf.convert_to_tensor(y_test.T)
    }

    return data

parameter.py:

import numpy as np
import tensorflow as tf

def initializeParameters(numberOfFeatures, numberOfImages):

    W1 = tf.Variable(np.random.randn(25, numberOfFeatures) )
    b1 = tf.Variable(np.zeros((25,1)))
    W2 = tf.Variable(np.random.randn(25, 25))
    b2 = tf.Variable(np.zeros((25,1)))
    W3 = tf.Variable(np.random.randn(10, 25))
    b3 = tf.Variable(np.zeros((10,1)))
    W4 = tf.Variable(np.random.randn(10, 10))
    b4 = tf.Variable(np.zeros((10,1)))

    parameters = {
        "W1": W1,
        "b1": b1,
        "W2": W2,
        "b2": b2,
        "W3": W3,
        "b3": b3,
        "W4": W4,
        "b4": b4
    }

    return parameters

forward.py:

import numpy as np
import tensorflow as tf

def forwardPropagation(X, parameters):

    Z1 = tf.add( tf.matmul(parameters["W1"], X) , parameters["b1"])
    A1 = tf.nn.relu(Z1)

    Z2 = tf.add( tf.matmul(parameters["W2"],A1) , parameters["b2"])
    A2 = tf.nn.relu(Z2)

    Z3 = tf.add( tf.matmul(parameters["W3"],A2) , parameters["b3"])
    A3 = tf.nn.relu(Z3)

    Z4 = tf.add( tf.matmul(parameters["W4"],A3) , parameters["b4"])
    hypothesis = tf.sigmoid(Z4)

    return (hypothesis, Z4)

cost.py:

import numpy as np
import tensorflow as tf

def computeCost(hypothesis, labels):

    onehotlabels = tf.cast(tf.one_hot(labels, depth = 10, axis = 0), tf.float64)

    losses = tf.nn.sigmoid_cross_entropy_with_logits(labels = onehotlabels, logits = hypothesis)
    cost = tf.reduce_mean(losses)

    return cost

完整输出:

Number of training data images: 60000
Number of testing data images: 10000
Image dimensions: (28, 28)
2018-12-03 12:35:50.912208: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2018-12-03 12:35:51.221523: F ./tensorflow/core/framework/tensor.h:663] Check failed: new_num_elements == NumElements() (96 vs. 60000)
Aborted

1 个答案:

答案 0 :(得分:1)

您要喂养的批量大小为60,000,这非常大。我重现了该错误,但当我将训练图像的数量限制为255个时,该错误就消失了。 Tensorflow具有非常丰富的类和方法来处理此类问题。

对方法进行以下更改。 从getDataset()方法仅返回numpy数组

def getDataset(datasetInfo = True, exampleImage = False):
    # body of the method
    data = {
        "X_train": np.reshape(x_train, (numberOfImages_train, -1)).astype(np.float64),
       "Y_train": y_train,
        "X_test": np.reshape(x_test, (numberOfImages_test, -1)).astype(np.float64),
        "Y_test": y_test
    }
    return data

内部模型方法进行以下更改

# Just to match the shape of X_train returned from getDataset method
(numberOfImages , numberOfFeatures ) = data["X_train"].shape
# Create iterator that gives you batch size of sample for at each batch for both training and test cases
train_data =  tf.data.Dataset.from_tensor_slices((data["X_train"],data["Y_train"])).repeat().batch(BATCH_SIZE)
test_data =  tf.data.Dataset.from_tensor_slices((data["X_test"],data["Y_test"])).repeat().batch(BATCH_SIZE)

train_itr = train_data.make_one_shot_iterator()
x_train, y_train = train_itr.get_next()
x_train = tf.transpose(x_train)

现在输入这些x_train和y_train来建立网络。

# initalize parameters whilst breaking symmetry
parameters = initializeParameters(numberOfFeatures, numberOfImages)

# construct forward propagation graph
(hypothesis, Z4) = forwardPropagation(x_train, parameters)

# define cost tensor
cost = computeCost(Z4, y_train)

我已经为getDatasetmodel方法提供了完整的代码

def getDataset(datasetInfo = True, exampleImage = False):

    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

    numberOfImages_train = np.shape(x_train)[0]
    numberOfImages_test  = np.shape(x_test)[0]

    imageDimension = (np.shape(x_train)[1], np.shape(x_train)[2])

    if datasetInfo:
        print("Number of training data images:", numberOfImages_train)
        print("Number of testing data images:", numberOfImages_test)
        print("Image dimensions:", imageDimension)

#    if exampleImage:
#        randIndex = np.random.randint(0, numberOfImages)
#        plt.imshow(X_mat[:,:,:, randIndex])


    data = {
        "X_train": np.reshape(x_train, (numberOfImages_train, -1)).astype(np.float64),
       "Y_train": y_train,
        "X_test": np.reshape(x_test, (numberOfImages_test, -1)).astype(np.float64),
        "Y_test": y_test
    }

    return data
def model(numberOfIterations = 10000, BATCH_SIZE=32):

    """
        Architecture:
                                        W1, b1                       W2, b2                      W3, b3                      W4, b4
        X(numberOfFeatures, numberOfImages) ---> L1(25, numberOfImages) ---> L2(25, numberOfImages) ---> L3(10, numberOfImages) ---> H(10,numberOFImages)
                                            relu                        relu                        relu                        sigmoid

        W1(25, numberOfFeatures)
        b1(25,1)
        W2(25, 25)
        b2(25, 1)
        W3(10, 25)
        b3(10, 1)
        W4(10, 10)
        b4(10, 1)

    """


    # get all dataset information
    data = getDataset()
    (numberOfImages , numberOfFeatures ) = data["X_train"].shape
    train_data =  tf.data.Dataset.from_tensor_slices((data["X_train"],data["Y_train"])).repeat().batch(BATCH_SIZE)
    test_data =  tf.data.Dataset.from_tensor_slices((data["X_test"],data["Y_test"])).repeat().batch(BATCH_SIZE)

    train_itr = train_data.make_one_shot_iterator()
    x_train, y_train = train_itr.get_next()
    x_train = tf.transpose(x_train)

    # initalize parameters whilst breaking symmetry
    parameters = initializeParameters(numberOfFeatures, numberOfImages)

    # construct forward propagation graph
    (hypothesis, Z4) = forwardPropagation(x_train, parameters)

    # define cost tensor
    cost = computeCost(Z4, y_train)

    # create optimizer
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    # define session object
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        # train network
        for iteration in range(numberOfIterations):
            sess.run(optimizer)

            if iteration % 10 == 0:
                print("After iteration", iteration, "cost =", sess.run(cost))


model(numberOfIterations = 50)