我有问题。我尝试执行此代码,但是我收到了错误。
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
import glob
import cv2
import numpy as np
def plot_figures(figures, nrows, ncols, labels=None):
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(12, 14))
axs = axs.ravel()
for index, title in zip(range(len(figures)), figures):
axs[index].imshow(figures[title], plt.gray())
if(labels != None):
axs[index].set_title(labels[index])
else:
axs[index].set_title(title)
axs[index].set_axis_off()
plt.tight_layout()
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
my_images = sorted(glob.glob('./traffic_images/*.jpg'))
my_labels = np.array([1, 28, 17, 10, 33])
name_values = np.genfromtxt('signnames.csv', skip_header=1, dtype=[('myint','i8'), ('mysring','S55')], delimiter=',')
figures = {}
labels = {}
my_signs = []
index = 0
for my_image in my_images:
img = cv2.cvtColor(cv2.imread(my_image), cv2.COLOR_BGR2RGB)
my_signs.append(img)
figures[index] = img
labels[index] = name_values[my_labels[index]][1].decode('ascii')
index += 1
plot_figures(figures, 5, 1, labels)
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
my_signs = np.array(my_signs)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./lenet")
my_accuracy = evaluate(my_signs, my_labels)
print("My Data Set Accuracy = {:.3f}".format(my_accuracy))
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
my_single_item_array = []
my_single_item_label_array = []
i = 0
for i in range(5):
my_single_item_array.append(my_signs[i])
my_single_item_label_array.append(my_labels[i])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./lenet")
my_accuracy = evaluate(my_single_item_array, my_single_item_label_array)
print('Image {}'.format(i+1))
print("Image Accuracy = {:.3f}".format(my_accuracy))
print()
k_size = 5
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=k_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./lenet")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: my_signs})
my_top_k = sess.run(top_k, feed_dict={x: my_signs})
for i in range(5):
figures = {}
labels = {}
figures[0] = my_signs[i]
labels[0] = "Original"
for j in range(k_size):
labels[j+1] = 'Guess {} : ({:.0f}%)'.format(j+1, 100*my_top_k[0][i][j])
figures[j+1] = X_valid[np.argwhere(y_valid == my_top_k[1][i][j])[0]].squeeze()
plot_figures(figures, 1, 6, labels)
ymax = figures[0].max()
ymin = figures[0].min()
outputFeatureMap(image_input=figures, tf_activation=softmax_logits, activation_min=ymin, activation_max=ymax , plt_num=k_size)
但是我收到了此错误消息
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-22-58bf65741b75> in <module>()
122 ymax = figures[0].max()
123 ymin = figures[0].min()
--> 124 outputFeatureMap(image_input=figures, tf_activation=softmax_logits, activation_min=ymin, activation_max=ymax , plt_num=k_size)
125
126
<ipython-input-22-58bf65741b75> in outputFeatureMap(image_input, tf_activation, activation_min, activation_max, plt_num)
38 # Note: x should be the same name as your network's tensorflow data placeholder variable
39 # If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function
---> 40 activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
41 featuremaps = activation.shape[3]
42 plt.figure(plt_num, figsize=(15,15))
/home/carnd/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session)
573
574 """
--> 575 return _eval_using_default_session(self, feed_dict, self.graph, session)
576
577
/home/carnd/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session)
3631 "the tensor's graph is different from the session's "
3632 "graph.")
-> 3633 return session.run(tensors, feed_dict)
3634
3635
/home/carnd/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
764 try:
765 result = self._run(None, fetches, feed_dict, options_ptr,
--> 766 run_metadata_ptr)
767 if run_metadata:
768 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/carnd/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
935 ' to a larger type (e.g. int64).')
936
--> 937 np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
938
939 if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
/home/carnd/anaconda3/envs/carnd-term1/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
480
481 """
--> 482 return array(a, dtype, copy=False, order=order)
483
484 def asanyarray(a, dtype=None, order=None):
TypeError: float() argument must be a string or a number, not 'dict'
我不确定问题是tf_activation=softmax_logits
还是其他问题。任何人都有任何想法???