Tensorflow Keras多重预测模型

时间:2019-11-14 13:58:36

标签: python tensorflow machine-learning keras deep-learning

我正在尝试使用卷积神经网络来预测单个图像的多个输出。目前,图像/相关值以img, value_1, value_2, value_3, value_4, value_5的形式存在于张量流数据集中,其中所有均为张量。我使用以下代码创建了一个模型:

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['mae'])

model.fit(train_ds, epochs=2, steps_per_epoch=STEPS_PER_EPOCH)

运行此命令时,出现以下错误:ValueError: too many values to unpack (expected 3)

我也尝试过将数据集创建为img, (value_1, value_2, value_3, value_4, value_5)形式,但这会产生以下错误:ValueError: Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), but instead got the following list of 5 arrays: [<tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>, <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=string>, <tf.Tensor 'ExpandDims_3...

如何修复我的模型以能够预测多个值?

编辑: 完整代码在这里:

from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import cv2
import imutils
from scipy import misc
import random
import glob
import pathlib

import os
import matplotlib.pyplot as plt
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"  

import tensorflow as tf
from tensorflow.keras import datasets, layers, models

import tensorflow.keras.backend as K

AUTOTUNE = tf.data.experimental.AUTOTUNE

def decode_img(img):
  img = tf.image.decode_png(img, channels=3)
  img = tf.image.convert_image_dtype(img, tf.float32)
  return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])

def process_path(file_path):
    parts = tf.strings.split(file_path, '_')
    file = tf.io.read_file(file_path)
    img = decode_img(file)
    return img, (tf.strings.split(parts[0], '/')[5], parts[1], parts[2], parts[3], tf.strings.split(parts[4], '.p')[0])


#for f in labeled_ds.take(1):
    #print(f[1])
    #print(tf.strings.split(f[1][4], '.p')[0])
    #print(tf.strings.split(f[1][0], '/')[5])

def prepare_for_training(ds, cache=False, shuffle_buffer_size=1000):
  if cache:
    if isinstance(cache, str):
      ds = ds.cache(cache)
    else:
      ds = ds.cache()

  ds = ds.shuffle(buffer_size=shuffle_buffer_size)
  ds = ds.repeat()
  ds = ds.batch(BATCH_SIZE)
  ds = ds.prefetch(buffer_size=AUTOTUNE)

  return ds

data_dir = pathlib.Path("/home/connor/capstone/train3/")
image_count = len(list(data_dir.glob('*.png')))
print(image_count)
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
BATCH_SIZE = 4
IMG_HEIGHT = 1024
IMG_WIDTH = 1024
STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE)

list_ds = tf.data.Dataset.list_files("/home/connor/capstone/train3/*.png")

labeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)
train_ds = prepare_for_training(labeled_ds)

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['mae'])

model.fit(train_ds, epochs=2, steps_per_epoch=STEPS_PER_EPOCH) 

0 个答案:

没有答案