我正在尝试使用tf.layers API实现uNet。任务是图像分割。下面,我将提供(按顺序):错误消息,我的网络定义,我的培训代码和我的验证代码。
我几天来一直在努力解决这个问题,根本无法弄清楚如何继续。如果有人能帮助我,我会非常感激!
要了解问题的核心,当我恢复模型时,我会收到一条错误消息,说明
FailedPreconditionError: Attempting to use uninitialized value prediction/Level1Encoding/conv1/conv2d/kernel
[[Node: prediction/Level1Encoding/conv1/conv2d/kernel/read = Identity[T=DT_FLOAT, _class=["loc:@prediction/Level1Encoding/conv1/conv2d/kernel"], _device="/job:localhost/replica:0/task:0/cpu:0"](prediction/Level1Encoding/conv1/conv2d/kernel)]]
Caused by op 'prediction/Level1Encoding/conv1/conv2d/kernel/read', defined at:
File "/Users/Karl/anaconda/lib/python3.6/site-packages/spyder/utils/ipython/start_kernel.py", line 231, in <module>
main()
File "/Users/Karl/anaconda/lib/python3.6/site-packages/spyder/utils/ipython/start_kernel.py", line 227, in main
kernel.start()
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/Users/Karl/anaconda/lib/python3.6/site-packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/Users/Karl/anaconda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2717, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2827, in run_ast_nodes
if self.run_code(code, result):
File "/Users/Karl/anaconda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-42-a9ecd95c66cb>", line 1, in <module>
runfile('/Users/Karl/Research/NNStuff/NewTumor/eval.py', wdir='/Users/Karl/Research/NNStuff/NewTumor')
File "/Users/Karl/anaconda/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 880, in runfile
execfile(filename, namespace)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/Users/Karl/Research/NNStuff/NewTumor/eval.py", line 58, in <module>
run_model()
File "/Users/Karl/Research/NNStuff/NewTumor/eval.py", line 42, in run_model
v_pred = uNet2D(X, BETA, KERNEL_SIZE, False)
File "/Users/Karl/Research/NNStuff/NewTumor/definitions.py", line 56, in uNet2D
conv1=tf.layers.conv2d(x,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 551, in conv2d
return layer.apply(inputs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 503, in apply
return self.__call__(inputs, *args, **kwargs)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 443, in __call__
self.build(input_shapes[0])
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 137, in build
dtype=self.dtype)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 383, in add_variable
trainable=trainable and self.trainable)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 1065, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 962, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 367, in get_variable
validate_shape=validate_shape, use_resource=use_resource)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 352, in _true_getter
use_resource=use_resource)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 725, in _get_single_variable
validate_shape=validate_shape)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variables.py", line 199, in __init__
expected_shape=expected_shape)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/variables.py", line 330, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1400, in identity
result = _op_def_lib.apply_op("Identity", input=input, name=name)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2630, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/Users/Karl/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1204, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value prediction/Level1Encoding/conv1/conv2d/kernel
[[Node: prediction/Level1Encoding/conv1/conv2d/kernel/read = Identity[T=DT_FLOAT, _class=["loc:@prediction/Level1Encoding/conv1/conv2d/kernel"], _device="/job:localhost/replica:0/task:0/cpu:0"](prediction/Level1Encoding/conv1/conv2d/kernel)]]
这对应于我网络的第一层。
因此定义了网络:
def uNet2D(x, REGULARIZER, KERNEL_SIZE, IS_TRAINING):
regularizer = tf.contrib.layers.l2_regularizer(scale=REGULARIZER)
#L1 encode
with tf.variable_scope('Level1Encoding'):
with tf.variable_scope('conv1'):
conv1=tf.layers.conv2d(x,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv1 = tf.layers.batch_normalization(
inputs=conv1,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv1 = tf.nn.relu(conv1)
with tf.variable_scope('conv2'):
conv2=tf.layers.conv2d(conv1,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv2 = tf.layers.batch_normalization(
inputs=conv2,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv2 = tf.nn.relu(conv2)
with tf.variable_scope('conv3'):
conv3=tf.layers.conv2d(conv2,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv3 = tf.layers.batch_normalization(
inputs=conv3,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv3 = tf.nn.relu(conv3)
conv3mp=tf.layers.max_pooling2d(conv3,2,2,padding='same')
with tf.variable_scope('Level2Encoding'):
with tf.variable_scope('conv1'):
conv4=tf.layers.conv2d(conv3mp,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv4 = tf.layers.batch_normalization(
inputs=conv4,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv4 = tf.nn.relu(conv4)
with tf.variable_scope('conv2'):
conv5=tf.layers.conv2d(conv4,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv5 = tf.layers.batch_normalization(
inputs=conv5,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv5 = tf.nn.relu(conv5)
with tf.variable_scope('conv3'):
conv6=tf.layers.conv2d(conv5,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv6 = tf.layers.batch_normalization(
inputs=conv6,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv6 = tf.nn.relu(conv6)
conv6mp=tf.layers.max_pooling2d(conv6,2,2,padding='same')
with tf.variable_scope('Level3Encoding'):
with tf.variable_scope('conv1'):
conv7=tf.layers.conv2d(conv6mp,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv7 = tf.layers.batch_normalization(
inputs=conv7,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv7 = tf.nn.relu(conv7)
with tf.variable_scope('conv2'):
conv8=tf.layers.conv2d(conv7,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv8 = tf.layers.batch_normalization(
inputs=conv8,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv8 = tf.nn.relu(conv8)
with tf.variable_scope('conv3'):
conv9=tf.layers.conv2d(conv8,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv9 = tf.layers.batch_normalization(
inputs=conv9,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv9 = tf.nn.relu(conv9)
conv9mp=tf.layers.max_pooling2d(conv9,2,2,padding='same')
with tf.variable_scope('Level4Encoding'):
with tf.variable_scope('conv1'):
conv10=tf.layers.conv2d(conv9mp,512,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv10 = tf.layers.batch_normalization(
inputs=conv10,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv10 = tf.nn.relu(conv10)
with tf.variable_scope('conv2'):
conv11=tf.layers.conv2d(conv10,512,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv11 = tf.layers.batch_normalization(
inputs=conv11,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv11 = tf.nn.relu(conv11)
with tf.variable_scope('conv3'):
conv12=tf.layers.conv2d(conv11,512,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv12 = tf.layers.batch_normalization(
inputs=conv12,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv12 = tf.nn.relu(conv12)
conv12mp=tf.layers.max_pooling2d(conv12,2,2,padding='same')
with tf.variable_scope('Level5'):
with tf.variable_scope('conv1'):
conv13=tf.layers.conv2d(conv12mp,1024,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv13 = tf.layers.batch_normalization(
inputs=conv13,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv13 = tf.nn.relu(conv13)
with tf.variable_scope('conv2'):
conv14=tf.layers.conv2d(conv13,1024,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv14 = tf.layers.batch_normalization(
inputs=conv14,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv14 = tf.nn.relu(conv14)
with tf.variable_scope('conv3'):
conv15=tf.layers.conv2d(conv14,1024,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv15 = tf.layers.batch_normalization(
inputs=conv15,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv15 = tf.nn.relu(conv15)
conv15=tf.layers.conv2d_transpose(conv15,512,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, strides=(2,2),padding='same')
with tf.variable_scope('Level4Decoding'):
inp = tf.concat([conv12,conv15],3)
with tf.variable_scope('conv1'):
conv16 = tf.layers.conv2d(inp,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv16 = tf.layers.batch_normalization(
inputs=conv16,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv16 = tf.nn.relu(conv16)
with tf.variable_scope('conv2'):
conv17=tf.layers.conv2d(conv16,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv17 = tf.layers.batch_normalization(
inputs=conv17,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv17 = tf.nn.relu(conv17)
with tf.variable_scope('conv3'):
conv18=tf.layers.conv2d(conv17,256,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv18 = tf.layers.batch_normalization(
inputs=conv18,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv18 = tf.nn.relu(conv18)
conv18=tf.layers.conv2d_transpose(conv18,256,(KERNEL_SIZE, KERNEL_SIZE),strides=(2,2), kernel_regularizer=regularizer, padding='same')
with tf.variable_scope('Level3Decoding'):
inp = tf.concat([conv9,conv18],3)
with tf.variable_scope('conv1'):
conv19 = tf.layers.conv2d(inp,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv19 = tf.layers.batch_normalization(
inputs=conv19,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv19 = tf.nn.relu(conv19)
with tf.variable_scope('conv2'):
conv20=tf.layers.conv2d(conv19,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv20 = tf.layers.batch_normalization(
inputs=conv20,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv20 = tf.nn.relu(conv20)
with tf.variable_scope('conv3'):
conv21=tf.layers.conv2d(conv20,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv21 = tf.layers.batch_normalization(
inputs=conv21,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv21 = tf.nn.relu(conv21)
conv21=tf.layers.conv2d_transpose(conv21,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, strides=(2,2),padding='same')
with tf.variable_scope('Level2Decoding'):
inp = tf.concat([conv6,conv21],3)
with tf.variable_scope('conv1'):
conv22 = tf.layers.conv2d(inp,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv22 = tf.layers.batch_normalization(
inputs=conv22,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv22 = tf.nn.relu(conv22)
with tf.variable_scope('conv2'):
conv23 = tf.layers.conv2d(conv22,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv23 = tf.layers.batch_normalization(
inputs=conv23,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv23 = tf.nn.relu(conv23)
with tf.variable_scope('conv3'):
conv24=tf.layers.conv2d(conv23,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv24 = tf.layers.batch_normalization(
inputs=conv24,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv24 = tf.nn.relu(conv24)
conv24=tf.layers.conv2d_transpose(conv24,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, strides=(2,2),padding='same')
with tf.variable_scope('Level1Decoding'):
inp = tf.concat([conv3,conv24],3)
with tf.variable_scope('conv1'):
conv25 = tf.layers.conv2d(inp,64,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv25 = tf.layers.batch_normalization(
inputs=conv25,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv25 = tf.nn.relu(conv25)
with tf.variable_scope('conv2'):
conv26 = tf.layers.conv2d(conv25,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv26 = tf.layers.batch_normalization(
inputs=conv25,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv26 = tf.nn.relu(conv26)
with tf.variable_scope('conv3'):
conv27 = tf.layers.conv2d(conv26,128,(KERNEL_SIZE, KERNEL_SIZE), kernel_regularizer=regularizer, padding='same')
conv27 = tf.layers.batch_normalization(
inputs=conv26,
axis=-1,
momentum=0.999,
epsilon=0.001,
center=True,
scale=True,
training = IS_TRAINING)
conv27=tf.nn.relu(conv27)
convOUT = tf.layers.conv2d(conv27,1,(1,1), kernel_regularizer=regularizer, padding='same')
return convOUT
我用来训练它的代码很简单:
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import glob
import os
from scipy.io import loadmat
from random import randint
from definitions import *
tf.reset_default_graph()
#HYPERPARAMS
LR = 1e-5
EPS = 1e-12
BETA = .1
BATCH_SIZE = 1
NUM_STEPS = 10 #number of iterations before we save
KERNEL_SIZE = 3
training=Dataset2D('/Users/Karl/Research/NNStuff/Tumor/Testing/')
X = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 256, 256, 5], name='X') #input
Y = tf.placeholder(tf.float32, shape = [BATCH_SIZE, 256, 256, 1], name='Y') #'labels'
def run_model():
GLOBAL_STEP = 0
with tf.variable_scope('prediction') as scope:
t_pred = uNet2D(X, BETA, KERNEL_SIZE, True)
t_cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.reshape(Y,[-1]),logits=tf.reshape(t_pred,[-1])))
scope.reuse_variables()
v_pred = uNet2D(X, BETA, KERNEL_SIZE, False)
v_cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.reshape(Y,[-1]),logits=tf.reshape(v_pred,[-1])))
optimizer = tf.train.AdamOptimizer(learning_rate=LR).minimize(t_cost)
with tf.name_scope("training"):
tf.summary.scalar("training_cost", t_cost, collections=['training'])
with tf.name_scope("validation"):
tf.summary.scalar("validation_cost", v_cost, collections=['validation'])
#tf.summary.image("VALIDATION_X",X, collections=['validation'])
#tf.summary.image("VALIDATION_Y",Y, collections=['validation'])
#tf.summary.image("VALIDATION_PRED", v_pred, collections=['validation'])
saver = tf.train.Saver()
with tf.Session() as sess:
train_merge = tf.summary.merge_all(key='training')
validation_merge = tf.summary.merge_all(key='validation')
print('Beginning Session!')
writer = tf.summary.FileWriter ( './graphs' , sess.graph)
sess.run(tf.global_variables_initializer())
print('Running Model!')
while True:
if GLOBAL_STEP % NUM_STEPS != 0:
x,y=training.drawBatch(BATCH_SIZE)
y=np.expand_dims(y,-1)
_, c, summary = sess.run([optimizer, t_cost, train_merge], feed_dict = {X: x, Y: y})
print(c)
else:
x,y=training.drawBatch(BATCH_SIZE)
y=np.expand_dims(y,-1)
c, summary = sess.run([v_cost, validation_merge], feed_dict = {X: x, Y: y})
save_path=saver.save(sess, './TumorOUT/model')
print('val')
print(c)
run_model()
我正试图存储和评估模型:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 20:03:35 2018
@author: Karl
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import glob
import os
from scipy.io import loadmat
from random import randint
from definitions import *
tf.reset_default_graph()
#HYPERPARAMS
LR = 1e-5
EPS = 1e-12
BETA = .1
BATCH_SIZE = 1
NUM_STEPS = 10 #number of iterations before we save
KERNEL_SIZE = 3
#training=Dataset2D('/Users/Karl/Research/NNStuff/Tumor/Testing/')
X = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 256, 256, 5], name='X') #input
Y = tf.placeholder(tf.float32, shape = [BATCH_SIZE, 256, 256, 1], name='Y') #'labels'
def run_model():
GLOBAL_STEP = 0
with tf.variable_scope('prediction') as scope:
v_pred = uNet2D(X, BETA, KERNEL_SIZE, False)
v_cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.reshape(Y,[-1]),logits=tf.reshape(v_pred,[-1])))
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('/Users/Karl/Research/NNStuff/NewTumor/TumorOUT/model.meta')
new_saver.restore(sess,tf.train.latest_checkpoint('/Users/Karl/Research/NNStuff/NewTumor/TumorOUT/'))
print('Beginning Session!')
print('Running Model!')
while True:
x,y=training.drawBatch(BATCH_SIZE)
y=np.expand_dims(y,-1)
c = sess.run([v_cost], feed_dict = {X: x, Y: y})
print('val')
print(c)
run_model()
答案 0 :(得分:0)
import_meta_graph
将创建与保存的图形对应的新操作,它返回的保护程序将仅恢复这些变量。所以你要么想要使用MetaGraph中的变量(你可以从当前的Graph中获取它们的名字),或者如果你想使用Python中定义的变量/ ops,只需加载检查点而不加载MetaGraph。