ValueError:形状不匹配:标签的形状(接收到(320,))应等于对数的形状,除了最后一个尺寸(接收到(32,2))

时间:2020-10-09 20:48:57

标签: python numpy tensorflow machine-learning neural-network

我是初学者。我被困在这个测试步骤。我的代码运行良好。 我的准确度达到91%左右,但是当我尝试对其进行测试时。 它显示了一个 error-> ValueError:形状不匹配:标签的形状(received(320,))应等于对数的形状,但最后一个维度(received(32,2))除外。

#Part 1:

import numpy as np
from sklearn import preprocessing

raw_csv_data = np.loadtxt('Audiobooks_data.csv', delimiter = ',')
unscaled_input_all = raw_csv_data[:,1:-1]
targets_all = raw_csv_data[:,-1]

#Balancing the dataset.

num_one_targets = (np.sum(targets_all))
zero_targets_counter = 0
indices_to_remove = []

for i in range(targets_all.shape[0]):
    if targets_all[i] == 0:
        zero_targets_counter += 1
        if zero_targets_counter > num_one_targets:
            indices_to_remove.append(i)
            
unscaled_inputs_after_balancing = np.delete(unscaled_input_all, indices_to_remove, axis=0)
unscaled_targets_after_balancing = np.delete(targets_all, indices_to_remove, axis=0)

scaled_inputs = preprocessing.scale(unscaled_inputs_after_balancing)

#shuffling the data after balancing.

shuffled_indices = np.arange(scaled_inputs.shape[0])
np.random.shuffle(shuffled_indices)

# Used the shuffled indices to shuffle the inputs and targets.
shuffled_inputs = scaled_inputs[shuffled_indices]
shuffled_targets = unscaled_targets_after_balancing[shuffled_indices]

#Split the dataset into train, validation, and test

samples_count = shuffled_inputs.shape[0]

# Count the samples in each subset, assuming we want 80-10-10 distribution of training, validation, and test.
train_samples_count = int(0.8 * samples_count)
validation_samples_count = int(0.1 * samples_count)
test_samples_count = samples_count - train_samples_count - validation_samples_count

train_inputs = shuffled_inputs[:train_samples_count]
train_targets = shuffled_targets[:train_samples_count]

validation_inputs = shuffled_inputs[train_samples_count:train_samples_count+validation_samples_count]
validation_targets = shuffled_targets[train_samples_count:train_samples_count+validation_samples_count]

test_inputs = shuffled_inputs[train_samples_count+validation_samples_count:]
test_targets = shuffled_inputs[train_samples_count+validation_samples_count:]

# Print the number of targets that are 1s, the total number of samples, and the proportion for training, validation, and test.
print(np.sum(train_targets), train_samples_count, np.sum(train_targets) / train_samples_count)
print(np.sum(validation_targets), validation_samples_count, np.sum(validation_targets) / validation_samples_count)
print(np.sum(test_targets), test_samples_count, np.sum(test_targets) / test_samples_count)

#Save the three datasets in *.npz.

np.savez('Audiobooks_data_train', inputs=train_inputs, targets=train_targets)
np.savez('Audiobooks_data_validation', inputs=validation_inputs, targets=validation_targets)
np.savez('Audiobooks_data_test', inputs=test_inputs, targets=test_targets)

#Part 2:

import numpy as np
import tensorflow as tf

#let's create a temporary variable npz, where we will store each of the three Audiobooks datasets
#Inputs must be float and targets must be int
#because of sparse_categorical_crossentropy (we want to be able to smoothly one-hot encode them)

npz = np.load('Audiobooks_data_train.npz')
train_inputs, train_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)

npz = np.load('Audiobooks_data_validation.npz')
validation_inputs, validation_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)

npz = np.load('Audiobooks_data_test.npz')
test_inputs, test_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)

#Model
#Outline, optimizers, loss, early stopping and training

input_size = 10
output_size = 2
hidden_layer_size = 50

model = tf.keras.Sequential([
    tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'), # 1st hidden layer
    tf.keras.layers.Dense(hidden_layer_size, activation = 'relu'), # 2nd hidden layer
    tf.keras.layers.Dense(output_size, activation = 'softmax')     # output layer
])

model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics='accuracy')

batch_size = 100
max_epochs = 100

# let's set patience=2, to be a bit tolerant against random validation loss increases
early_stopping = tf.keras.callbacks.EarlyStopping(patience=2)

model.fit(train_inputs,
          train_targets,
          batch_size = batch_size,
          epochs = max_epochs,
          # callbacks are functions called by a task when a task is completed
          # task here is to check if val_loss is increasing
          callbacks = [early_stopping],
          validation_data = (validation_inputs, validation_targets),
          verbose = 2
         )

# Test the model

test_loss, test_accuracy = model.evaluate(test_inputs, test_targets)

#ValueError: Shape mismatch: The shape of labels (received (320,)) should equal the shape of logits except for the last dimension (received (32, 2)).

0 个答案:

没有答案