我正在使用TensorFlow训练自动编码器。因为我的输入通常很吵,所以我想使用SciPy的ndimage.gaussian_filter1d
根据过滤后的数据评估网络的损失。当我将TensorFlow张量输入此函数时,我得到一个ValueError: invalid axis
(见下文)。谁能建议我如何解决或解决此问题?
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter1d
def run_trainer():
tf.reset_default_graph()
N_data = 1000
noise = 0.2 * (np.random.rand(N_data) - 0.5)
x = np.linspace(0, 2*np.pi, N_data)
y = np.sin(x)
y_noisy = (y + noise).reshape((1, -1))
input = tf.placeholder(tf.float32, shape=[None, N_data])
fc1 = fully_connected(inputs=input, num_outputs=500, activation_fn=tf.nn.relu)
fc2 = fully_connected(inputs=fc1, num_outputs=250, activation_fn=tf.nn.relu)
fc3 = fully_connected(inputs=fc2, num_outputs=500, activation_fn=tf.nn.relu)
output = fully_connected(inputs=fc3, num_outputs=1000, activation_fn=tf.nn.tanh)
smooth_input = gaussian_filter1d(input, sigma=10)
smooth_output = gaussian_filter1d(output, sigma=10)
loss = tf.reduce_mean(tf.square(smooth_input - smooth_output))
# Comment out this guy for working version
# loss = tf.reduce_mean(tf.square(input - output))
optimiser = tf.train.AdamOptimizer(2e-5)
train_step = tf.contrib.training.create_train_op(loss, optimiser)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for i in range(int(1e3)):
_, loss_train = sess.run(
[train_step, loss],
feed_dict={input: y_noisy}
)
if i % 100 == 0:
print("step: %i \t loss: %.3e" % (i, loss_train))
result = output.eval(feed_dict={input: y_noisy})
plt.plot(x, y_noisy.flatten())
plt.plot(x, result.flatten())
plt.show()
if __name__ == "__main__":
run_trainer()
Traceback (most recent call last):
File "/***/stack_TF_axis.py", line 60, in <module>
run_trainer()
File "/***/stack_TF_axis.py", line 27, in run_trainer
smooth_input = gaussian_filter1d(input, sigma=10)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/scipy/ndimage/filters.py", line 204, in gaussian_filter1d
return correlate1d(input, weights, axis, output, mode, cval, 0)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/scipy/ndimage/filters.py", line 86, in correlate1d
axis = _ni_support._check_axis(axis, input.ndim)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/scipy/ndimage/_ni_support.py", line 90, in _check_axis
raise ValueError('invalid axis')
ValueError: invalid axis
答案 0 :(得分:1)
基于@LukeDeLuccia的建议,我将this解决方案改编为用于一维张量的高斯滤波。作为参考,我在下面提供了一个工作示例:
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import numpy as np
import matplotlib.pyplot as plt
# Based on: https://stackoverflow.com/a/52012658/1510542
# Credits to @zephyrus, @LukeDeLuccia, and @xdurch0
def gaussian_kernel(size, mean, std):
d = tf.distributions.Normal(tf.cast(mean, tf.float32), tf.cast(std, tf.float32))
vals = d.prob(tf.range(start=-size, limit=size+1, dtype=tf.float32))
kernel = vals[:, tf.newaxis, tf.newaxis]
return kernel / tf.reduce_sum(kernel)
def gaussian_filter(input, sigma):
size = int(4*sigma + 0.5)
x = input[:, :, tf.newaxis]
kernel = gaussian_kernel(size=size, mean=0.0, std=sigma)
conv = tf.nn.conv1d(x, kernel, stride=1, padding="SAME")
return conv
def run_trainer():
tf.reset_default_graph()
# Define size of data, batch sizes
N_data = 1000
noise = 0.2 * (np.random.rand(N_data) - 0.5)
x = np.linspace(0, 2*np.pi, N_data)
y = np.sin(x)
y_noisy = (y + noise).reshape((1, -1))
input = tf.placeholder(tf.float32, shape=[None, N_data])
fc1 = fully_connected(inputs=input, num_outputs=500, activation_fn=tf.nn.relu)
fc2 = fully_connected(inputs=fc1, num_outputs=250, activation_fn=tf.nn.relu)
fc3 = fully_connected(inputs=fc2, num_outputs=500, activation_fn=tf.nn.relu)
output = fully_connected(inputs=fc3, num_outputs=1000, activation_fn=tf.nn.tanh)
smooth_input = gaussian_filter(input, sigma=1)
smooth_output = gaussian_filter(output, sigma=1)
loss = tf.reduce_mean(tf.square(smooth_input - smooth_output))
optimiser = tf.train.AdamOptimizer(2e-5)
train_step = tf.contrib.training.create_train_op(loss, optimiser)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for i in range(int(1e4)):
_, loss_train = sess.run(
[train_step, loss],
feed_dict={input: y_noisy}
)
if i % 100 == 0:
print("step: %i \t loss: %.3e" % (i, loss_train))
result, smooth_result = sess.run(
[output, smooth_output],
feed_dict={input: y_noisy}
)
plt.plot(x, y_noisy.flatten())
plt.plot(x, result.flatten())
plt.plot(x, smooth_result.flatten())
plt.show()
if __name__ == "__main__":
run_trainer()