环境:Win10,Python 3.6.8,Tensorflow-gpu 1.13.1
我有一个基于tf.estimator.DNNRegressor的模型,该模型已经在本地进行了培训。现在,我遇到了一个问题,要使Forecast_input_fn函数可以处理实时数据。我通过读取具有单个条目的CSV文件使其工作,但是我希望从内存中传递一个numpy数组或某种其他形式的数据,并让模型做出预测。
这是一个代码段:
tf.enable_eager_execution()
model = tf.estimator.DNNRegressor(
hidden_units=[128,128],
feature_columns=feature_columns,
model_dir=model_dir)
def predict_input_fn():
test_array = np.array([-0.057,-0.0569]) # shortened, normally 56 floats
defaults = [tf.float32] * 56
filenames = ['./prediction.csv']
dataset_old = tf.data.experimental.CsvDataset( # this works
filenames=filenames,
record_defaults=defaults)
dataset = tf.data.Dataset.from_tensor_slices(test_array) # this doesn't :(
dataset = dataset.batch(1)
features = tf.data.experimental.get_single_element(dataset)
return dict(zip(feature_names, features)), None
predictor = model.predict(input_fn=predict_input_fn)
print(next(predictor))
即使启用了急切执行,我也会遇到以下错误:
TypeError:仅在启用急切执行后,张量对象才可迭代。要遍历此张量,请使用tf.map_fn。
我要训练的数据格式是56 x float32变量作为特征,1 x float32变量作为标签。如前所述,在使用tf.data.experimental.CsvDataset()函数时,它可以很好地工作,但就我的一生而言,我不知道如何输入基本数组。
我尝试使用数据集上的标准make_one_shot_iterator()函数将数组放入容器数组中,并使用.constant()尝试构建张量,然后再将其传递给tensor_slices。浏览了文档以及我可以找到的每个示例,但仍然无法正常工作。
苦苦挣扎,任何帮助将不胜感激。
下面的完整代码:
import tensorflow as tf
# import tensorflowjs as tfjs
import json
import numpy as np
tf.enable_eager_execution()
model_dir = './dnn3/'
feature_columns = []
feature_names = []
feature_spec = {}
for i in range(56):
feature_columns.append(tf.feature_column.numeric_column(key="jkey"+str(i)))
feature_names.append("jkey"+str(i))
feature_spec["jkey"+str(i)] = tf.FixedLenFeature([1],tf.float32)
def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string,shape=[None],name='input_tensors')
receiver_tensors = {'inputs': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def train_input_fn():
defaults = [tf.float32] * 57
filenames = ['./training.csv'] # can add to this array
dataset = tf.data.experimental.CsvDataset(
filenames=filenames,
record_defaults=defaults)
dataset = dataset.shuffle(1000000)
dataset = dataset.repeat(1000) # epochs
dataset = dataset.batch(128)
iter = dataset.make_one_shot_iterator()
next = iter.get_next()
features = next[:-1]
label = next[-1]
return dict(zip(feature_names, features)), label
def eval_input_fn():
defaults = [tf.float32] * 57
filenames = ['./evaluation.csv']
dataset = tf.data.experimental.CsvDataset(
filenames=filenames,
record_defaults=defaults)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat(100)
dataset = dataset.batch(100)
iter = dataset.make_one_shot_iterator()
next = iter.get_next()
features = next[:-1]
label = next[-1]
return dict(zip(feature_names, features)), label
def predict_input_fn():
test_array = [[-0.057,-0.0569,-0.0569,-0.0759,-0.0568,-0.0379,-0.0379,-0.0567,-0.0946,0.0378,0.0566,0.0755,0,0,0,0,-0.0189,0,0,0,0,0.0379,0.0378,0.0568,0,0.0378,0,0.0758,0.0379,0.0379,0.057,0.0758,0.0379,0.0381,-0.152,0.2089,-0.0952,0.0568,-0.2485,.4667,-0.0803,-0.0775,-0.0832,-0.054,-0.0989,0.0063,0.0037,-0.0342,0.0007,0.0281,0.0187,-0.0065,0.0423,0.078,-0.0285,-0.0093],[-0.057,-0.0569,-0.0569,-0.0759,-0.0568,-0.0379,-0.0379,-0.0567,-0.0946,0.0378,0.0566,0.0755,0,0,0,0,-0.0189,0,0,0,0,0.0379,0.0378,0.0568,0,0.0378,0,0.0758,0.0379,0.0379,0.057,0.0758,0.0379,0.0381,-0.152,0.2089,-0.0952,0.0568,-0.2485,.4667,-0.0803,-0.0775,-0.0832,-0.054,-0.0989,0.0063,0.0037,-0.0342,0.0007,0.0281,0.0187,-0.0065,0.0423,0.078,-0.0285,-0.0093]]
defaults = [tf.float32] * 56
filenames = ['./prediction.csv']
dataset2 = tf.data.experimental.CsvDataset(
filenames=filenames,
record_defaults=defaults)
test_tensor = tf.constant(test_array)
#test_dict = dict(zip(feature_names, test_array))
dataset = tf.data.Dataset.from_tensor_slices(test_tensor)
dataset = dataset.batch(1)
features = tf.data.experimental.get_single_element(dataset)
return dict(zip(feature_names, features)), None
model = tf.estimator.DNNRegressor(
hidden_units=[128,128],
feature_columns=feature_columns,
model_dir=model_dir)
def train():
for i in range(5):
model.train(input_fn=train_input_fn, steps=10000)
eval_result = model.evaluate(input_fn=eval_input_fn)
print(eval_result)
def train_eval():
train_spec = tf.estimator.TrainSpec(train_input_fn,max_steps=20000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(model,train_spec,eval_spec)
def save():
save_dir = model.export_savedmodel(export_dir_base=model_dir+'saved', serving_input_receiver_fn=serving_input_receiver_fn)
print('Saved to ',save_dir)
def predict():
predictor = model.predict(input_fn=predict_input_fn)
print(next(predictor))
predict()
# tfjs.converters.save_keras_model(model, './tfjs/')
以及完整的错误响应:
WARNING:tensorflow:From C:\Users\jimba\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
Traceback (most recent call last):
File "train.py", line 93, in <module>
predict()
File "train.py", line 91, in predict
print(next(predictor))
File "C:\Users\jimba\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 609, in predict
input_fn, model_fn_lib.ModeKeys.PREDICT)
File "C:\Users\jimba\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 967, in _get_features_from_input_fn
result = self._call_input_fn(input_fn, mode)
File "C:\Users\jimba\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 1079, in _call_input_fn
return input_fn(**kwargs)
File "train.py", line 67, in predict_input_fn
return dict(zip(feature_names, features)), None
File "C:\Users\jimba\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 442, in __iter__
"Tensor objects are only iterable when eager execution is "
TypeError: Tensor objects are only iterable when eager execution is enabled. To iterate over this tensor use tf.map_fn