我正在尝试构建一个使用sampled_softmax_loss
的模型,而我似乎无法获得为该函数正确塑造的输入张量。这是一个示例,我可以告诉它应该与文档匹配,但抛出此异常:
ValueError:Shape必须是等级2,但是等级为1 'sampled_softmax_loss / LogUniformCandidateSampler'(op: 'LogUniformCandidateSampler')输入形状:[?]。
代码:
import tensorflow as tf
import numpy as np
f1 = np.random.randint(low = 0, high = 4,size = 100)
labels = np.random.randint(low = 0, high = 5,size = 100)
f1_t = tf.feature_column.categorical_column_with_vocabulary_list('f1', vocabulary_list = [0,1,2,3])
base_columns = [f1_t]
feat_dict = {'f1' : f1}
def my_model_fn(
features,
labels,
mode,
params):
logits = tf.feature_column.linear_model(features, base_columns,units = params["n_classes"])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
weights = [v for v in tf.global_variables() if v.name == 'linear_model/f1/weights:0'][0]
biases = [v for v in tf.global_variables() if v.name == 'linear_model/bias_weights:0'][0]
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=tf.transpose(weights),
biases=biases,
labels=labels,
inputs=logits,
num_classes = 5,
num_sampled= 11,
num_true=1,
partition_strategy="div")
elif mode == "eval":
None
# implement later
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
)
optimizer = tf.train.FtrlOptimizer(learning_rate=.1,l2_regularization_strength=0.1)#AdagradOptimizer(0.001)
train_op = optimizer.minimize(
loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=train_op)
classifier = tf.estimator.Estimator(
model_fn = my_model_fn,
params = {
"feature_columns" : base_columns,
"n_classes" : 5
})
classifier.train(
input_fn = tf.estimator.inputs.numpy_input_fn(feat_dict,
labels,
batch_size = 3,
num_epochs=2,
shuffle=True))
如果有人能给我一些指示,我会永远欠你虚拟啤酒。
答案 0 :(得分:0)
标签应采用[batch, one_hot]
形状,例如[100, 6]