详情如下:
import os
import matplotlib.pyplot
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import sys
import numpy
(ds_train,ds_test),ds_info=tfds.load(
"imdb_reviews",
split=["train","test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
tokenizer=tfds.deprecated.text.Tokenizer()
def buildvoca():
voca=set()
for text , _ in ds_train:
voca.update(tokenizer.tokenize(text.numpy().lower()))
return voca
vocabulary=buildvoca()
encoder=tfds.deprecated.text.TokenTextEncoder(
vocabulary,oov_token="<UNK>",lowercase=True,tokenizer=tokenizer
)
def myencoding(text_tensor,label):
return encoder.encode(text_tensor.numpy()),label
def encodemap(text,label):
encoded_text,label=tf.py_function(myencoding,inp=[text,label],Tout=(tf.int64,tf.int64))
encoded_text.set_shape([None])
label.set_shape([])
return encoded_text,label
AUTOTUNE=tf.data.experimental.AUTOTUNE
ds_train=ds_train.map(encodemap,num_parallel_calls=AUTOTUNE)
ds_train=ds_train.cache()
ds_train=ds_train.shuffle(10000)
ds_train=ds_train.padded_batch(32,padded_shapes=([None],()))
ds_train=ds_train.prefetch(AUTOTUNE)
ds_test=ds_test.map(encodemap)
ds_test=ds_test.padded_batch(32,padded_shapes=([None],()))
model=keras.Sequential(
[
layers.Masking(mask_value=0),
layers.Embedding(input_dim=len(vocabulary)+2,output_dim=32),
layers.GlobalAveragePooling1D(),
layers.Dense(64,activation="relu"),
layers.Dense(1),
]
)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"]
)
model.fit(ds_train,epochs=10,verbose=2)
model.evaluate(ds_test)
我收到以下错误:
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Received a label value of 1 which is outside the valid range of [0, 1). Label values: 1 1 0 0 1 0 0 1 1 0 1 0 1 1 0 1 1 0 1 0 0 1 1 1 0 1 0 1 1 1 1 1
[[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits (defined at \Desktop\modelsaving\model.py:58) ]] [Op:__inference_train_function_50876]
Function call stack:
train_function
答案 0 :(得分:0)
您没有指定正确的损失函数。
只需将其更改为二元交叉熵:
model.compile(
loss=keras.losses.BinaryCrossentropy(from_logits=True), # use BCE
optimizer=keras.optimizers.Adam(learning_rate=0.001),
metrics=["accuracy"]
)