如何在Keras中实现具有多个输出的自定义层

时间:2020-01-10 13:51:12

标签: tensorflow keras deep-learning

我试图在Keras中实现具有多个输出的自定义层。

自定义层位于下面:

// Connection to socket sock already happened and we just received data!

// Get the sender's IP address as string
if (source_addr.sin6_family == PF_INET) 
{
    inet_ntoa_r(((struct sockaddr_in *)&source_addr)->sin_addr.s_addr, addr_str, sizeof(addr_str) - 1);
} 
else if (source_addr.sin6_family == PF_INET6) 
{
    inet6_ntoa_r(source_addr.sin6_addr, addr_str, sizeof(addr_str) - 1);
}

int err = send(sock, brb_message, sizeof(brb_message), 0);
if (err < 0) 
{
    ESP_LOGE(TAG, "Error occurred during sending: errno %d", errno);
    break;
}

shutdown(sock, 0);
closesocket(sock);

esp_wifi_disconnect();
connect_to_new_wifi_and_do_stuff();

// get back into joint WiFi
esp_wifi_disconnect();
connect_to_old_wifi();

// send back collected data via TCP
dest_addr.sin_addr.s_addr = inet_addr(addr_str);
dest_addr.sin_family = AF_INET;
dest_addr.sin_port = htons(TCP_PORT); 
addr_family = AF_INET;
ip_protocol = IPPROTO_IP;

int sock_back =  socket(addr_family, SOCK_STREAM, ip_protocol);
if (sock_back < 0) 
{
    ESP_LOGE(TAG, "Unable to create socket: errno %d", errno);
}
ESP_LOGI(TAG, "Socket created, connecting to %s:%d", addr_str, TCP_PORT);

else
{
    err = connect(sock_back, (struct sockaddr *)&dest_addr, sizeof(dest_addr));
    while (err < 0) 
    {
        ESP_LOGE(TAG, "Socket unable to connect: errno %d", errno);
        err = connect(sock_back, (struct sockaddr *)&dest_addr, sizeof(dest_addr));
    }
    ESP_LOGI(TAG, "Successfully connected");

    err = send(sock_back, run_id, sizeof(run_id), 0);
    if (err < 0) 
    {
        ESP_LOGE(TAG, "Error occurred during sending to %s: errno %d", addr_str, errno);
        break;
    }   
}

模型如下:

class DFConv2D(Layer):
  def __init__(self, **kwargs):
    super(DFConv2D, self).__init__(**kwargs)

  def build(self, input_shape):
    self._conv2d = Conv2D(input_shape[3], (3, 3), padding='same')
    super(DFConv2D, self).build(input_shape)

  def call(self, x):
    x_shape = x.get_shape()
    offsets = self._conv2d(x)
    ###################
    x = tf.reshape(offsets, (-1, int(x_shape[3]), int(x_shape[1]), int(x_shape[2])))
    x = tf.transpose(x, [0, 2, 3, 1])
    ###################
    x = tf.transpose(x, [0, 3, 1, 2])
    x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2])))
    ###################
    x, coords = tf_batch_map_offsets(x, offsets)
    ####################
    x = tf.reshape(x, (-1, int(x_shape[3]), int(x_shape[1]), int(x_shape[2])))
    x_offset = tf.transpose(x, [0, 2, 3, 1])
    ####################
    return [x_offset, coords]

    def compute_output_shape(self, input_shape):
        return [input_shape, (None, input_shape[1]*input_shape[2], 2)]

运行此代码会出现以下错误:

inputs = l = Input((28, 28, 1), name='input')
trainable = True
# conv11
l = Conv2D(32, (3, 3), padding='same', name='conv11', trainable=trainable)(l)
l = Activation('relu', name='conv11_relu')(l)
l = BatchNormalization(name='conv11_bn')(l)

# conv12
l = DFConv2D(name='conv12_offset')(l)
l_offset_1 = l[0]
l_coords_1 = l[1]
l = Conv2D(64, (3, 3), padding='same', strides=(2, 2), name='conv12', trainable=trainable)(l_offset_1)
l = Activation('relu', name='conv12_relu')(l)
l = BatchNormalization(name='conv12_bn')(l)

我该如何解决这个问题?

0 个答案:

没有答案