我正在尝试使用tensorflow的tf.estimator.LinearClassifier进行作业分配。我已经使用它来显示当图像只是缩小和展开时效果不佳,但是我无法通过使用预先训练的分类器提取要对其进行训练的特征来使其工作。它给我“张量张量[取决于您使用的内容]不是此图的元素”错误。我已经在这里以前的文章中尝试过“ clear_session()”(现在为tf.keras.backend.clear_session())解决方案,它不起作用(我什至没有让它第一次成功运行,没有什么好清除的) )
您将在下面的代码中看到很多我尝试过的事情。我已经在您可以翻转的行以及出现的错误附近添加了注释。为了显示其余代码无需使用预先训练的分类器即可进行特征提取,请交换“预处理步骤”下的注释行,它将在没有错误的情况下运行和训练。
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import scipy.io as sio
tf.enable_eager_execution() #Leave on or turn off, same error results
%matplotlib inline
path='peppers.jpg' #pick any two images you have
path2='cat.jpg'
trainingSet=[path,path,path,path,path2]
trainingLabels=[0,0,0,0,1]
testSet=[path,path,path2,path,path]
testLabels=[0,0,1,0,0]
batchsize=1
AUTOTUNE = tf.data.experimental.AUTOTUNE
#~~~~~~~~
#from tensorflow.keras.applications.vgg16 import VGG16 as PreTrained
#from tensorflow.keras.preprocessing import image as imagePrep
#from tensorflow.keras.applications.vgg16 import preprocess_input as ModelPrep
#VGG16 gives Error: Tensor Tensor("MaxPool_4:0", shape=(?, ?, ?, 512), dtype=float32) is not an element of this graph.
from tensorflow.keras.applications.resnet50 import ResNet50 as PreTrained
from tensorflow.keras.preprocessing import image as imagePrep
from tensorflow.keras.applications.resnet50 import preprocess_input as ModelPrep
#ResNET gives Error: Tensor Tensor("Relu_48:0", shape=(?, ?, ?, 2048), dtype=float32) is not an element of this graph.
featureExtractor=PreTrained(weights='imagenet',include_top=False)
def preUnwrap(path): #Use this code to show the classifier works, no feature extraction
raw_image = tf.read_file(path)
decodedImage = tf.image.decode_jpeg(raw_image, channels=3)
img=tf.image.resize_images(decodedImage,(74,113)) #Shrinking to a size so I can match length
features=img #Unnecessary rename so I can again show I'm just using the image itself
vectorized=tf.reshape(features,[-1])
vectorized=tf.concat((vectorized,[0,0]),axis=0)# Just Trying to match the shape
return vectorized
def preCNN(path):
#Commented Out Code modified from from https://keras.io/applications/
#img_path = 'elephant.jpg'
#img = imagePrep.load_img(path, target_size=(224, 224)) #Error: 'Tensor'
object has no attribute 'read'
#x = imagePrep.img_to_array(img)
#x = np.expand_dims(img, axis=0) #Error: setting an array element with a sequence
raw_image = tf.read_file(path)
decodedImage = tf.image.decode_jpeg(raw_image, channels=3)
img=tf.image.resize_images(decodedImage,(224,224))
x = tf.expand_dims(img, axis=0)
x = ModelPrep(x)
features=featureExtractor.predict(x,steps=1) #Do not remove "steps", will kill kernel repeatedly
vectorized=tf.reshape(features,[-1]) #I doubt I actually have to unwrap this
return vectorized
def prob2CNNinputTrain():
#Training set specific
petSetPathDataSet=tf.data.Dataset.from_tensor_slices(trainingSet)
imageLabels=trainingLabels
#Pre-processing Steps. Swap these to show the classifier works
#petTFimages=petSetPathDataSet.map(preUnwrap,num_parallel_calls=AUTOTUNE)
petTFimages=petSetPathDataSet.map(preCNN,num_parallel_calls=AUTOTUNE)
#Zip the data and prepare the iterator
imageCount=len(imageLabels)
labelDataSet=tf.data.Dataset.from_tensor_slices(imageLabels)
pet_img_label=tf.data.Dataset.zip((petTFimages,labelDataSet))
pet_img_label=pet_img_label.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=imageCount))
pet_img_label=pet_img_label.batch(batchsize)
pet_img_label=pet_img_label.prefetch(buffer_size=AUTOTUNE)
iterator=pet_img_label.make_one_shot_iterator()
image,label=iterator.get_next()
return {'image':image},label
def prob2CNNinputTEST():
#Training set specific
petSetPathDataSet=tf.data.Dataset.from_tensor_slices(testSet)
imageLabels=testLabels
#Pre-processing Steps. Swap these to show the classifier works
#petTFimages=petSetPathDataSet.map(preUnwrap,num_parallel_calls=AUTOTUNE)
petTFimages=petSetPathDataSet.map(preCNN,num_parallel_calls=AUTOTUNE)
#Zip the data and prepare the iterator
imageCount=len(imageLabels)
labelDataSet=tf.data.Dataset.from_tensor_slices(imageLabels)
pet_img_label=tf.data.Dataset.zip((petTFimages,labelDataSet))
pet_img_label=pet_img_label.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=imageCount))
pet_img_label=pet_img_label.batch(batchsize)
pet_img_label=pet_img_label.prefetch(buffer_size=AUTOTUNE)
iterator=pet_img_label.make_one_shot_iterator()
image,label=iterator.get_next()
return {'image':image},label
def myMetrics(labels,predictions):
metricsToAdd={'Mean Per Class Accuracy':tf.metrics.mean_per_class_accuracy(labels,predictions['class_ids'],num_classes=2)}
return metricsToAdd
def CNN_Model_fn():
feature_columns = [tf.feature_column.numeric_column(key="image",shape=7*7*512)]
estimator=tf.estimator.LinearClassifier(feature_columns=feature_columns,model_dir='models/Pretrained',n_classes=2)
estimator=tf.contrib.estimator.add_metrics(estimator, myMetrics)
return estimator
#~~~~~~~~
imgCount=len(trainingLabels)
numSteps=15*imgCount//batchsize+1
CNN_Model_fn().train(prob2CNNinputTrain,steps=numSteps)
#tf.keras.backend.clear_session() #Can't even run when brand new, does nothing
#~~~~~~~~
imgCount=len(testLabels)
numSteps=imgCount//batchsize+1
metrics=CNN_Model_fn().evaluate(prob2CNNinputTEST,steps=numSteps)
metrics
考虑到我是如何设置此功能的,以查看重现该问题的最小代码量,您应该获得100%的准确性,而不是错误消息。
在另一则新闻中...如果堆栈溢出是为了帮助人们互相帮助编码,那么当实际上说要放入代码时,他们怎么没有像乳胶铸造的包装那样的东西呢?轻微兑换,下一页可让您进行格式化,但是至少应在上一页上进行格式化。
编辑:发布堆栈溢出后,将为您着色代码。抱歉,第一次使用此功能的用户感到惊讶。