是否有可能训练NN近似识别这个模型的素数?

时间:2017-08-27 16:03:14

标签: python algorithm tensorflow neural-network

我是机器学习的新手。试图运行Hello World示例https://www.tensorflow.org/get_started/mnist/beginners

  • 递归下降。
  • y = softmax(Wx + b)
  • 成本函数= cross_entropy

它有效。

使用以下数据集尝试下一步:

  • X - 范围(0,100000)
  • Y - [x中的x的[is_prime(x)]

(调整W和b尺寸)

而我所看到的是,W和b在训练过程中是不变的。

问题:

我的代码或整个数学概念中是否存在错误?

由于

此处的代码(如果相关):

#!/usr/bin/python

import tensorflow as tf
import numpy as np
import math
import sys

def is_prime(n):
    if n % 2 == 0: 
        return False
    return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2))

NUM_FEATURES = 32
NUM_LABELS = 2
DATA_SET_SIZE = 100000
BATCH_SIZE = 100
NUM_BATCHES = DATA_SET_SIZE / BATCH_SIZE
NUM_TEST_BATCHES = 1
NUM_TRAINING_BATCHES = NUM_BATCHES - NUM_TEST_BATCHES
NUM_TRAINING_CYCLES = NUM_TRAINING_BATCHES
NUM_TRAINING_DISPLAY_STEPS = 10
TRAINING_DISPLAY_STEP_SIZE = NUM_TRAINING_CYCLES / NUM_TRAINING_DISPLAY_STEPS

def convert_data(fvecs, labels):
    fvecs_np = np.matrix(fvecs).astype(np.float32)
    labels_np = np.array(labels).astype(dtype=np.uint8)

    labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
    sys.stdout.flush()
    return fvecs_np, labels_onehot

def to_bits(x):
    return [x & (1 << b) for b in range(NUM_FEATURES)]


def training_data():
    #from tensorflow.examples.tutorials.mnist import input_data
    #mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    labels = []
    fvecs = []

    for x in range(DATA_SET_SIZE):
        fvecs.append(to_bits(x))
        labels.append(is_prime(x))

    return convert_data(fvecs, labels)

def test_data():
    return convert_data([to_bits(10), to_bits(15485867)], [False, True])

def variable_summaries(var):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)

#print len(primes)
def main():

    # Build graph:
    x = tf.placeholder(tf.float32, shape=[None, NUM_FEATURES])
    y_ = tf.placeholder(tf.float32, shape=[None, NUM_LABELS])

    with tf.name_scope('W'):
        W = tf.Variable(tf.zeros([NUM_FEATURES, NUM_LABELS]))
        variable_summaries(W)

    with tf.name_scope('b'):
        b = tf.Variable(tf.zeros([NUM_LABELS]))
        variable_summaries(b)

    with tf.name_scope("Wx_b"):
        y = tf.nn.softmax(tf.matmul(x, W) + b)

    with tf.name_scope("cross_entropy"):
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y), reduction_indices=[1]))

    with tf.name_scope("train"):
        train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    with tf.Session() as sess:

        merged_summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter('train', sess.graph)
        tf.global_variables_initializer().run()
        avg_cost = 0

        vecs, labels = training_data()
        test_x, test_y = test_data()
        print "Training..."
        for batch in range(NUM_TRAINING_BATCHES):
            batch_start = batch * BATCH_SIZE
            batch_end = batch_start + BATCH_SIZE
            batch_x_data = vecs[batch_start:batch_end]
            batch_y_data = labels[batch_start:batch_end]

            _               = sess.run(train_step,         feed_dict={x: batch_x_data, y_: batch_y_data})
            summary         = sess.run(merged_summary_op,  feed_dict={x: batch_x_data, y_: batch_y_data})
            avg_cost        = sess.run(cross_entropy,      feed_dict={x: batch_x_data, y_: batch_y_data})
            test_cost       = sess.run(cross_entropy,      feed_dict={x: test_x, y_: test_y})

            train_writer.add_summary(summary, batch)

            if batch % TRAINING_DISPLAY_STEP_SIZE == 0:
                print "Iteration %04u: Cost = %.9f. Test cost: %.9f." % (batch, avg_cost, test_cost)
        print "Done."         

        print "Testing..."
        #vecs, labels = test_data()
        print(sess.run(y, {x: vecs}))

main()

1 个答案:

答案 0 :(得分:2)

我认为这是不可能的。机器学习方法只是学习将x映射到f(w; x)的函数。然后将损失函数定义为损失(y,f(w; x))。 x是表示某事物特征的向量。如果有一个函数可以告诉我们天气x是素数,那么nn可能会找到它。但我不认为有这样的功能(可能存在,因为没有数学家证明没有这样的功能)