我们是否可以将CNN用于不是图像的1D数据,例如表示函数值的9个实数的列表?在这段代码中,我使用9实数列表作为训练数据。该问题涉及使用不同的导数在第5点计算数值导数。此问题的标签表示最接近实际导数的方法。
from keras.utils import np_utils
import numpy as np
import random
import numpy as np
from numpy import *
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def calculation_of_labels(func,derf):
f2f= (func[5]-func[4])/(0.125)
f2b= (func[4]-func[3])/(0.125)
f3=(func[5]-func[3])/(0.125*2)
f4b=(2*func[5]+3*func[4]-6*func[3]+func[2])/(6*0.125)
f4f= (-1*func[6]+6*func[5]-3*func[4]-2*func[3])/(6*0.125)
f5=(-1*func[6]+8*func[5]-8*func[3]+func[2])/(0.125*12)
f6b=(-3*func[6]+30*func[5]+20*func[4]-60*func[3]+15*func[2]-2*func[1])/(0.125*60)
f6f=(2*func[7]-15*func[6]+func[5]-20*func[4]-30*func[3]+3*func[2])/(0.125*60)
f7=(func[7]-9*func[6]+45*func[5]-45*func[3]+9*func[2]-1*func[1])/(0.125*60)
f8f=(-3*func[8]+28*func[7]-126*func[6]+420*func[5]-105*func[4]-252*func[3]+42*func[2]-4*func[1])/(0.125*420)
f8b=(4*func[7]-42*func[6]+252*func[5]+105*func[4]-420*func[3]+126*func[2]-28*func[1]+3*func[0])/(0.125*420)
f9=(-3*func[8]+32*func[7]-168*func[6]+672*func[5]-672*func[3]+168*func[2]-32*func[1]+3*func[0])/(0.125*840)
myList=[f2b,f2f,f3,f4b,f4f,f5,f6b,f6f,f7,f8b,f8f,f9]
b=min(myList, key=lambda x:abs(x-derf))
a=myList.index(b)
return a
funvalue=[]
lab=[]
fun_der_mat=0
for number in range(0,300000):
function=0
fder=0
Ak= random.uniform(1,5)
xv=np.arange(0,1.1,0.125)
for k in range(1,5):
phi = random.uniform(0,1)
function = function+ (Ak/k)*np.sin(2*np.pi*k*xv+ phi*2*np.pi)
fder = fder+ (Ak)*2*np.pi*np.cos(2*np.pi*k*xv+ phi*2*np.pi)
for j in range(0,9):
function[j] = round(function[j],3)
fder[j] = round(fder[j],3)
funvalue.append(function)
lab.append(calculation_of_labels(function, fder[4]))
funvalue= np.array(funvalue)
funfinal= np.reshape(funvalue, (300000,9))
inputdata,Label = shuffle(funfinal,lab, random_state = 2)
train_data = [inputdata, Label]
x= tf.placeholder(tf.float32, [None,9])
y_ = tf.placeholder(tf.float32, [None, 12])
#logs_path= '/tmp/check/'
(X,s)= (train_data[0],train_data[1])
X_train, X_test, y_train, y_test = train_test_split(X, s, test_size = 0.2, random_state=4)
X_train= np.array(X_train)
X_test= np.array(X_test)
X_train = X_train.astype('float32')
X_test= X_test.astype('float32')
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
keep_prob = tf.placeholder(tf.float32)####CHECK PLEASE
learning_rate = 0.05
n_hidden_1 = 200 # 1st layer number of features
n_hidden_2 = 200 # 2nd layer number of features
n_hidden_3 = 200
n_input = 9
n_classes = 12
def next_batch(index_receive, dat, labels):
dat_shuffle = [dat[ i] for i in index_receive]
labels_shuffle = [labels[ i] for i in index_receive]
return np.asarray(dat_shuffle), np.asarray(labels_shuffle)
w1 = tf.Variable(tf.zeros([9, 200]))
b1 = tf.Variable(tf.zeros([200]))
w2 = tf.Variable(tf.zeros([200, 200]))
b2 = tf.Variable(tf.zeros([200]))
w3 = tf.Variable(tf.zeros([200, 200]))
b3 = tf.Variable(tf.zeros([200]))
wo = tf.Variable(tf.zeros([200, 12]))
bo = tf.Variable(tf.zeros([12]))
def multilayer_perceptron(x):
layer_1 = tf.add(tf.matmul(x, w1), b1)
layer_1 = tf.nn.relu(layer_1)
layer_1= tf.nn.dropout(layer_1,keep_prob)
layer_2 = tf.add(tf.matmul(layer_1, w2),b2)
layer_2 = tf.nn.relu(layer_2)
layer_2= tf.nn.dropout(layer_2,keep_prob)
layer_3 = tf.add(tf.matmul(layer_2, w3), b3)
layer_3 = tf.nn.relu(layer_3)
layer_3= tf.nn.dropout(layer_3,keep_prob)
out_layer = tf.matmul(layer_3, wo) + bo
return out_layer
pred = multilayer_perceptron(x)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels=y_))
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
init = tf.global_variables_initializer()
batch_size = 5000
with tf.Session() as sess:
sess.run(init)
sess.run(tf.global_variables_initializer())
for epoch in range(2000):
index=np.arange(0,240000)
np.random.shuffle(index)
batch_count = 240000//batch_size + 1
for j in range(batch_count):
start = j*batch_size
end_idx = start + batch_size
if j == (batch_count - 1 ):
break
swiped_index = index[start:end_idx]
batch = next_batch(swiped_index, X_train,Y_train)
optimizer.run(feed_dict={x:batch[0], y_: batch[1], keep_prob : 0.5})
# Display logs per epoch step
if epoch % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, Training accuracy %g" %( epoch, train_accuracy))
print("Optimization Finished!")
index_rec=np.arange(0,60000)
np.random.shuffle(index_rec)
swiped=index_rec[0:5000]
batch1= next_batch(swiped,X_test,Y_test)
print("test accuracy %g"%accuracy.eval(feed_dict={x: batch1[0], y_: batch1[1], keep_prob: 1.0}))