Tensorflow错误:考虑将元素强制转换为受支持的类型

时间:2019-04-30 11:02:54

标签: python tensorflow scikit-image

我只是使用文件夹中的scikit-image加载图像,但是当我运行get_batches()函数时,发生了错误。我看了一些博客,但问题仍然存在。我不知道如何处理。

问题发生在image = tf.cast(image, tf.string)中,位于函数get_batches()中。

Environment: python 3.6tensorflow 1.12.0scikit-imagematplotlib

我是新手。我不知道是什么原因引起的。下面是我的代码。我不知道该如何处理。如果有人有一些好主意,也可以在评论部分告诉我。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
import skimage
from skimage import io
import re

#find "cat" or "dog" from string
def find_word_in_string(directory, word, return_number=0):
    matchObj = re.search(word, directory, re.M|re.I)
    if matchObj:
        return return_number
    else:
        return -1


# label to image,if "cat" the label is 0, if dog the label is 1.
def find_method(directory): 
    word = "cat"
    result = -1
    if word == "cat":
        result = directory.find(word)
        if result != -1: 
            return 0
    word = "dog"
    if word == "dog":
        result = directory.find(word)
        if result != -1: 
            return 1
    return result

# save date to .txt file.
def write_data_to_txt(data, path, authority): 
    string_data = "".join(str(s) for s in data) 
    performance = open(path,authority)
    performance.write(string_data)
    performance.close()

def show_image(imglist):
    count = 0
    for i in imglist[:10]:
        count += 1
        print("----->",count)
        io.imshow(i)
        io.show()  #Sometime the picture was display,should add this API.

def load_data(data_dir):
    # Get all subdirectories of data_dir. Each represents a label.
    directories = [d for d in os.listdir(data_dir) 
                   if os.path.isdir(os.path.join(data_dir, d))]
    for d in os.listdir(data_dir):
        path = data_dir+ "\\"+ d
        if os.path.isfile(path):
            directories.append(path)
    labels = []
    images = []
    for f in directories:
        #Load an image from file.
        images.append(skimage.data.imread(f)) 
        label = find_method(f)
        if label != -1: 
            labels.append(label)
        else:
            print("No match!!")
    write_data_to_txt(labels,"G:\\DeepLearning\\CatOrDogDataset\\labels.txt","w+")
    return images, labels

def get_batches(image, label, resize_w, resize_h, batch_size, capacity):
    #tfansform imagelist to tf.string
    #transform label to tf.int64

    image = tf.cast(image, tf.string) 
    label = tf.cast(label, tf.int64)  #
    queue = tf.train.slice_input_producer([image, label])
    label = queue[1]
    image_temp = tf.read_file(queue[0])
    image = tf.image.decode_jpeg(image_temp, channels = 3)
    #resize image 
    image = tf.image.resize_image_with_crop_or_pad(image, resize_w, resize_h)

    image = tf.image.per_image_standardization(image)
    image_batch, label_batch = tf.train.batch([image, label], batch_size = batch_size,
        num_threads = 64,capacity = capacity)
    images_batch = tf.cast(image_batch, tf.float32)
    labels_batch = tf.reshape(label_batch, [batch_size])
    return images_batch, labels_batch

train_data_dir = "G:\\DeepLearning\\CatOrDogDataset\\new_train"
images, labels = load_data(train_data_dir)
show_image(images)
train_images_batch, train_labels_batch = get_batches(images, labels, 64, 64, 32,32)
print("----->finished!")

1 个答案:

答案 0 :(得分:0)

您正在将图像投射到string。考虑使用tf.float32tf.int64。我认为tf.float32是个不错的选择,因为稍后您将以这种方式在代码中进行批量处理。

images_batch = tf.cast(image_batch, tf.float32)

get_batches内是文件名字符串的数组。您需要加载每个文件并将其转换为张量

from keras.preprocessing import image
def to_tensor(img_path):
  img = image.load_img(img_path, target_size=(224, 224))  
  # target size should match the input of the model
  x = image.img_to_array(img)
  return np.expand_dims(x, axis=0)

list_of_tensors = [to_tensor(img_path) for img_path in image]
batches = np.vstack(list_of_tensors)
# now use the 4d tensor for the rest of processing