张量流lstm转换为张量tpu模型?

时间:2018-10-07 08:11:47

标签: python tensorflow

可能是什么问题?

from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Input, LSTM, Dense
def create_model():
# create a small LSTM network
    model = Sequential()
    model.add(LSTM(20, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
    model.add(LSTM(20, return_sequences=True))
    model.add(LSTM(10, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(4, return_sequences=False))
    model.add(Dense(4, kernel_initializer='uniform', activation='relu'))
    model.add(Dense(1, kernel_initializer='uniform', activation='relu'))

import tensorflow as tf

tf.logging.set_verbosity(tf.logging.DEBUG)

tf.keras.backend.clear_session()
model=create_model()

tpu_model = tf.contrib.tpu.keras_to_tpu_model(
    model,
    strategy=tf.contrib.tpu.TPUDistributionStrategy(
        tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)))
  

警告:tensorflow:tpu_model(来自tensorflow.contrib.tpu.python.tpu.keras_support)是实验性的,可能随时更改或删除,而不会发出警告。

     

ValueError:提取参数不能解释为张量。 (Tensor Tensor(“ lstm_13 / kernel:0”,shape =(79,320),dtype = float32_ref)不是该图的元素。)

1 个答案:

答案 0 :(得分:0)

First, change the backend to include a TPU using the notebook settings available in the Edit -> Notebook settings menu if it's not selected.

import tensorflow as tf
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout, Activation, Input, LSTM, Dense
import os
def create_model(X):
# create a small LSTM network
    model = Sequential()
    model.add(tf.keras.layers.LSTM(20, input_shape=(X.shape[1],X.shape[2]),return_sequences=True))
    model.add(tf.keras.layers.LSTM(20, return_sequences=True))
    model.add(tf.keras.layers.LSTM(10, return_sequences=True))
    model.add(tf.keras.layers.Dropout(0.2))
    model.add(tf.keras.layers.LSTM(4, return_sequences=False))
    model.add(tf.keras.layers.Dense(4, kernel_initializer='uniform', activation='relu'))
    model.add(tf.keras.layers.Dense(1, kernel_initializer='uniform', activation='relu'))
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
                  metrics=['accuracy'])
# This address identifies the TPU we'll use when configuring TensorFlow.
tpu='grpc://' + os.environ['COLAB_TPU_ADDR']
tf.logging.set_verbosity(tf.logging.DEBUG)
tf.keras.backend.clear_session()
model=create_model(X)
#Convert Keras model to TPU model
tpu_model = tf.contrib.tpu.keras_to_tpu_model(
    model,
    strategy=tf.contrib.tpu.TPUDistributionStrategy(
        tf.contrib.cluster_resolver.TPUClusterResolver(tpu)))