恢复MonitoredTrainingSession后的“未初始化值”

时间:2019-03-08 08:10:11

标签: tensorflow restore

尝试还原我的MonitoredTrainingSession之后,我遇到了一个错误Attempting to use uninitialized value decoder/b_d_03。我该如何解决这个问题?我想还原我的VariationalAutoencoder,而只使用解码器(最后几行代码)。

随时修改以下代码。

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import random
import sys
import cv2
from scipy.stats import norm
import time

def normalize(img, max=1):
    a = np.shape(img)
    img = np.reshape(img, (a[0],a[1],1))
    return max*(img-np.amin(img))/(np.amax(img)-np.amin(img))

def loadDATA(url,prefix):
    files = os.listdir(url)
    img = [np.reshape(np.dot(plt.imread(url+i),[1,0,0]),(43,43,1)) for i in files if(i.startswith(prefix))]
    img_name = [i for i in files if(i.startswith(prefix))]
    return img, img_name

print('load data...')
url = '...'
prefix = 'SPOT'
img, name = loadDATA(url,prefix)
img = [normalize(i) for i in img]
print('data loaded:', np.shape(img)[0], 'examples with dimension:', np.shape(img)[1],'x',np.shape(img)[2],'.')

#Layers
def conv2d(input, name, kshape, strides=[1,1,1,1], padding='SAME'):
    with tf.name_scope(name):
        W = tf.get_variable(name='w_'+name,
                            shape=kshape,
                            initializer=tf.contrib.layers.xavier_initializer(uniform=False))
        b = tf.get_variable(name='b_'+name,
                            shape=[kshape[3]],
                            initializer=tf.contrib.layers.xavier_initializer(uniform=False))
        out = tf.nn.conv2d(input,W,strides=strides,padding=padding)
        out = tf.nn.bias_add(out,b)
        return out

def conv2dLEAKYRELU(input, name, kshape, strides=[1,1,1,1], padding='SAME'):
    with tf.name_scope(name):
        out = tf.nn.leaky_relu(conv2d(input,name,kshape,strides,padding))
        return out

def fullyConnected(input, name, output_size):
    with tf.name_scope(name):
        input_size = input.shape[1:]
        input_size = int(np.prod(input_size))
        W = tf.get_variable(name='w_'+name,
                            shape=[input_size, output_size],
                            initializer=tf.contrib.layers.xavier_initializer(uniform=False))
        b = tf.get_variable(name='b_'+name,
                            shape=[output_size],
                            initializer=tf.contrib.layers.xavier_initializer(uniform=False))
        input = tf.reshape(input, [-1, input_size])
        out = tf.add(tf.matmul(input,W),b)
        return out

def fullyConnectedRELU(input, name, output_size):
    with tf.name_scope(name):
        out = tf.nn.relu(fullyConnected(input,name,output_size))
        return out

def fullyConnectedLEAKYRELU(input, name, output_size):
    with tf.name_scope(name):
        out = tf.nn.leaky_relu(fullyConnected(input,name,output_size))
        return out

def fullyConnectedSIGMOID(input, name, output_size):
    with tf.name_scope(name):
        out = tf.nn.sigmoid(fullyConnected(input,name,output_size))
        return out

def fullyConnectedSOFTPLUS(input, name, output_size):
    with tf.name_scope(name):
        out = tf.nn.softplus(fullyConnected(input,name,output_size))
        return out

def deconv2d(input, name, kshape, n_outputs, strides=[1,1], padding='SAME'):
    with tf.name_scope(name):
        out = tf.contrib.layers.conv2d_transpose(input,
                                                 num_outputs=n_outputs,
                                                 kernel_size=kshape,
                                                 stride=strides,
                                                 padding=padding,
                                                 weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
                                                 biases_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
                                             activation_fn=None)
    return out

def deconv2dRELU(input, name, kshape, n_outputs, strides=[1,1], padding='SAME'):
    with tf.name_scope(name):
        out = tf.contrib.layers.conv2d_transpose(input,
                                                 num_outputs=n_outputs,
                                                 kernel_size=kshape,
                                                 stride=strides,
                                                 padding=padding,
                                                 weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
                                                 biases_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
                                             activation_fn=tf.nn.relu)
            return out
#Ende Layers

def normalization01(input):
    return tf.div(tf.subtract(input,tf.reduce_min(input)),tf.subtract(tf.reduce_max(input),tf.reduce_min(input)),name='img_norm')

def Encoder(data,name):
    with tf.name_scope(name):
        input = tf.reshape(data,[-1,43,43,1])
        input = normalization01(input)
        print(input)
        x = tf.layers.flatten(input)
        print(x)
        x = fullyConnectedRELU(x,name='e_01',output_size=500)
        print(x)
        x = fullyConnectedRELU(x,name='e_02',output_size=500)
        print(x)
        x = fullyConnectedRELU(x,name='e_03',output_size=200)
        print(x)
        x = fullyConnectedRELU(x,name='e_04',output_size=200)
        print(x)
        loc = fullyConnected(x,name='mean',output_size=2)
        scale = fullyConnectedSOFTPLUS(x,name='std',output_size=2)
        print(loc)
        print(scale)
        return tf.contrib.distributions.MultivariateNormalDiag(loc,scale,name='z')

def Decoder(z,name):
    print(z)
    with tf.name_scope(name):
        y = fullyConnectedRELU(z,name='d_01',output_size=200)
        print(y)
        y = fullyConnectedRELU(y,name='d_02',output_size=200)
        print(y)
        y = fullyConnectedRELU(z,name='d_03',output_size=500)
        print(y)
        y = fullyConnectedRELU(y,name='d_04',output_size=500)
        print(y)
        y = fullyConnectedLEAKYRELU(y,name='d_05',output_size=1849)
        print(y)
        y = tf.reshape(y,(-1,43,43))
        y = tf.contrib.distributions.Independent(tf.contrib.distributions.Bernoulli(y),2,name='output')
        print(y)
        return y

def Prior():
    loc = tf.zeros(2,name='loc')
    scale = tf.ones(2,name='scale')
    return tf.contrib.distributions.MultivariateNormalDiag(loc, scale,name='zz')

Encoder = tf.make_template('encoder', Encoder)
Decoder = tf.make_template('decoder', Decoder)

#Modell erstellen
data = tf.placeholder(tf.float32, [None,43,43,1])
Prior = Prior()
posterior = Encoder(data, name='Enc')
code = posterior.sample()

#Fehlerfunktion definieren
likelihood = Decoder(code, name='Dec').log_prob(tf.reshape(data,(-1,43,43)))
divergence = tf.contrib.distributions.kl_divergence(posterior,Prior)
elbo = tf.reduce_mean(likelihood-divergence)
optimize = tf.train.AdamOptimizer(0.001).minimize(-elbo,global_step=tf.train.get_or_create_global_step())

#Trainieren und Visiualisieren
samples = Decoder(Prior.sample(10), name='Dec').mean()
vector = tf.placeholder(tf.float32,[None,2], name='vector')
print(vector)
sam = Decoder(vector, name='Dec').mean()
train_img = img
test_img = img[:10]
test_samples =np.ones(1)
print('----------')
print(samples)
'''
with tf.train.MonitoredTrainingSession(checkpoint_dir='C:/Users/vcshwaf/Desktop/Studienarbeit/Bildvorverarbeitung/Update20190224/') as sess:
    for epoch in range(2):
        feed = {data: np.reshape(test_img,(-1,43,43,1))}
        test_elbo, test_codes, test_samples = sess.run([elbo, code, samples], feed)
        print('Epoch',epoch,'elbo',test_elbo)
        random.shuffle(train_img)
        for i in range(30):
            img_batch = np.concatenate([train_img[i:i+100]])
            feed = {data: np.reshape(img_batch,(-1,43,43,1))}
            sess.run(optimize,feed)

        if epoch%1==0:
            n=15
            figure=np.zeros((43*n,43*n))
            grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
            grid_y = norm.ppf(np.linspace(0.05,0.95, n))
            for i, yi in enumerate(grid_x):
                for j, xi in enumerate(grid_y):
                    z_sample=np.array([[xi,yi]])
                    feed = {vector: z_sample}
                    example = sess.run(sam, feed)
                    example = np.reshape(example,(43,43))
                    figure[i*43:(i+1)*43,
                           j*43:(j+1)*43]=example
            plt.figure(figsize=(10,10))
            plt.imshow(figure,cmap='gray')
            plt.show()
'''
with tf.Session() as sess:
    saver = tf.train.import_meta_graph('.../model.ckpt-60.meta')
    saver.restore(sess, '.../model.ckpt-60')
    n=15
    figure=np.zeros((43*n,43*n))
    grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
    grid_y = norm.ppf(np.linspace(0.05,0.95, n))
    for i, yi in enumerate(grid_x):
        for j, xi in enumerate(grid_y):
            z_sample=np.array([[xi,yi]])
            feed = {vector: z_sample}
            example = sess.run(sam, feed)
            example = np.reshape(example,(43,43))
            figure[i*43:(i+1)*43,
                   j*43:(j+1)*43]=example
    plt.figure(figsize=(10,10))
    plt.imshow(figure,cmap='gray')
    plt.show()

0 个答案:

没有答案