> def train(self,iterations=1000,save_dir="saved_models"):
> #Removing previous save directory if there is one
> if os.path.exists(save_dir):
> shutil.rmtree(save_dir)
> #Make new save directory
> os.mkdir(save_dir)
> #Just a tf thing, to merge all summaries into one
> merged = tf.summary.merge_all()
> #Using adam optimizer as mentioned in the paper
> optimizer = tf.train.AdamOptimizer()
> #This is the train operation for our objective
> train_op = optimizer.minimize(self.loss)
> #Operation to initialize all variables
> init = tf.global_variables_initializer()
> print("Begin training...")
> with self.sess as sess:
> #Initialize all variables
> sess.run(init)
> test_exists = self.test_data
> #create summary writer for train
> train_writer = tf.summary.FileWriter(save_dir+"/train",sess.graph)
> #If we're using a test set, include another summary writer for that
> if test_exists:
> test_writer = tf.summary.FileWriter(save_dir+"/test",sess.graph)
> test_x,test_y = self.test_data(*self.test_args)
> test_feed = {self.input:test_x,self.target:test_y}
> #This is our training loop
> for i in tqdm(range(iterations)):
> #Use the data function we were passed to get a batch every iteration
> x,y = self.data(*self.args)
> #Create feed dictionary for the batch
> feed = {
> self.input:x,
> self.target:y
> }
> #Run the train op and calculate the train summary
> summary,_ = sess.run([merged,train_op],feed)
> #If we're testing, don't train on test set. But do calculate summary
> if test_exists:
> t_summary = sess.run(merged,test_feed)
> #Write test summary
> test_writer.add_summary(t_summary,i)
> #Write train summary for this step
> train_writer.add_summary(summary,i)
> #Save our trained model
> self.save()
File "train.py", line 18, in <module>
network.train(args.iterations,args.savedir)
File "/home/psoni/EDSR-Tensorflow/model.py", line 222, in train
t_summary = sess.run(merged,test_feed)
File "/home/psoni/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 895, in run
run_metadata_ptr)
File "/home/psoni/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1093, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/home/psoni/anaconda2/lib/python2.7/site-packages/numpy/core/numeric.py", line 531, in asarray
return array(a, dtype, copy=False, order=order)
训练时,此错误消息会在几个时期后持续弹出。 在查看之前的帖子后,我认为这是一个错误,因为可以通过更改dtype =来编辑不正确的数据结构。这发生在训练的前十个时期内,所以也许是因为记忆问题?我看到另一个用户遇到了这个问题。非常感谢任何和所有帮助。