将Tensorflow v1模型保存为TensorflowLite v2转换器兼容格式

时间:2019-11-29 07:51:27

标签: python tensorflow tensorflow2.0

我有一个Tensorflow v1模型,其权重是从.npy文件中加载的。该模型的代码和权重的加载如下:

class AlexNet(object):
    """Implementation of the AlexNet."""

    def __init__(self, x, keep_prob, num_classes, skip_layer,
                 weights_path='DEFAULT'):
        """Create the graph of the AlexNet model.
        Args:
            x: Placeholder for the input tensor.
            keep_prob: Dropout probability.
            num_classes: Number of classes in the dataset.
            skip_layer: List of names of the layer, that get trained from
                scratch
            weights_path: Complete path to the pretrained weight file, if it
                isn't in the same folder as this code
        """
        # Parse input arguments into class variables
        self.X = x
        self.NUM_CLASSES = num_classes
        self.KEEP_PROB = keep_prob
        self.SKIP_LAYER = skip_layer

        if weights_path == 'DEFAULT':
            self.WEIGHTS_PATH = 'bvlc_alexnet.npy'
        else:
            self.WEIGHTS_PATH = weights_path

        # Call the create function to build the computational graph of AlexNet
        self.create()

    def create(self):
        """Create the network graph."""
        # 1st Layer: Conv (w ReLu) -> Lrn -> Pool
        conv1 = conv(self.X, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
        norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')
        pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')

        # 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
        conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
        norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')
        pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')

        # 3rd Layer: Conv (w ReLu)
        self.conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')
        self.flattened = tf.reshape(self.conv3, [1, 64896], name='output')

    def load_initial_weights(self, session):
        """Load weights from file into network.
        As the weights from http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
        come as a dict of lists (e.g. weights['conv1'] is a list) and not as
        dict of dicts (e.g. weights['conv1'] is a dict with keys 'weights' &
        'biases') we need a special load function
        """
        # Load the weights into memory
        weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item()

        # Loop over all layer names stored in the weights dict
        for op_name in weights_dict:

            # Check if layer should be trained from scratch
            if op_name not in self.SKIP_LAYER:

                with tf.compat.v1.variable_scope(op_name, reuse=True):

                    # Assign weights/biases to their corresponding tf variable
                    for data in weights_dict[op_name]:

                        # Biases
                        if len(data.shape) == 1:
                            var = tf.compat.v1.get_variable('biases', trainable=False, use_resource=False)
                            session.run(var.assign(data))

                        # Weights
                        else:
                            var = tf.compat.v1.get_variable('weights', trainable=False, use_resource=False)
                            session.run(var.assign(data))


def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,
         padding='SAME', groups=1):
    """Create a convolution layer.
    Adapted from: https://github.com/ethereon/caffe-tensorflow
    """
    # Get number of input channels
    input_channels = int(x.get_shape()[-1])

    # Create lambda function for the convolution
    convolve = lambda i, k: tf.nn.conv2d(input=i, filters=k,
                                         strides=[1, stride_y, stride_x, 1],
                                         padding=padding)

    with tf.compat.v1.variable_scope(name) as scope:
        # Create tf variables for the weights and biases of the conv layer
        weights = tf.compat.v1.get_variable('weights', shape=[filter_height,
                                                    int(filter_width),
                                                    int(input_channels/groups),
                                                    num_filters], use_resource=False)
        biases = tf.compat.v1.get_variable('biases', shape=[num_filters], use_resource=False)

    if groups == 1:
        conv = convolve(x, weights)

    # In the cases of multiple groups, split inputs & weights and
    else:
        # Split input and weights and convolve them separately
        input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
        weight_groups = tf.split(axis=3, num_or_size_splits=groups,
                                 value=weights)
        output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]

        # Concat the convolved output together again
        conv = tf.concat(axis=3, values=output_groups)

    # Add biases
    bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(input=conv))

    # Apply relu function
    relu = tf.nn.relu(bias, name=scope.name)

    return relu


def fc(x, num_in, num_out, name, relu=True):
    """Create a fully connected layer."""
    with tf.compat.v1.variable_scope(name) as scope:

        # Create tf variables for the weights and biases
        weights = tf.compat.v1.get_variable('weights', shape=[num_in, num_out],
                                  trainable=True, use_resource=False)
        biases = tf.compat.v1.get_variable('biases', [num_out], trainable=True, use_resource=False)

        # Matrix multiply weights and inputs and add bias
        act = tf.compat.v1.nn.xw_plus_b(x, weights, biases, name=scope.name)

    if relu:
        # Apply ReLu non linearity
        relu = tf.nn.relu(act)
        return relu
    else:
        return act


def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,
             padding='SAME'):
    """Create a max pooling layer."""
    return tf.nn.max_pool2d(input=x, ksize=[1, filter_height, filter_width, 1],
                          strides=[1, stride_y, stride_x, 1],
                          padding=padding, name=name)


def lrn(x, radius, alpha, beta, name, bias=1.0):
    """Create a local response normalization layer."""
    return tf.nn.local_response_normalization(x, depth_radius=radius,
                                              alpha=alpha, beta=beta,
                                              bias=bias, name=name)


def dropout(x, keep_prob):
    """Create a dropout layer."""
    return tf.nn.dropout(x, 1 - (keep_prob))

g = tf.Graph()
with g.as_default():
    # Initialize all variables
    x = tf.compat.v1.placeholder(tf.float32, [1, 227, 227, 3])
    y = tf.compat.v1.placeholder(tf.float32, [1, 1000])
    keep_prob = tf.compat.v1.placeholder(tf.float32)
    alex_net = AlexNet(x, keep_prob, 1000, ["conv4", "conv5", "pool5", "fc6", "fc7", "fc8"])

    output = alex_net.flattened

    saver = tf.compat.v1.train.Saver()
    sess = tf.compat.v1.Session()
    init = tf.compat.v1.global_variables_initializer()
    sess.run(init)

    # Load the pretrained weights into the non-trainable layer
    alex_net.load_initial_weights(sess)
    graph_def = g.as_graph_def()

正如您所看到的,我使用张量流提供的转换器将v1模型转换为v2模型,但这主要是使用compat.v1模式。无论如何,我想用这些权重以这种格式保存该模型,以便新的TensorflowLite转换器可以将其转换为.tflite文件。我知道它需要一个keras模型(h5),一个具体函数(此处不适用或我错了吗?)或一个保存的模型。 现在的问题是:如何保存该模型,或者将这个图形定义保存为与TensorflowLite v2转换器兼容的保存的模型格式?我必须对模型进行任何更改吗?

0 个答案:

没有答案