与AWS ECS群集的连接正在超时

时间:2019-10-21 17:31:50

标签: amazon-web-services nginx amazon-ec2 amazon-ecs amazon-elb

我有一个<ModalGateway> {modalIsOpen ? ( <Modal onClose={this.toggleGallery}> <Carousel views={this.imgs} currentIndex={currentIndex} frameProps={{ autoSize: 'height' }} components={{ Footer: CustomFooter }} /> </Modal> ) : null} </ModalGateway> 集群在专用子网中的ECS之上运行。我的注册商中有一条EC2记录,将我的API域路由到面向公众的CNAME的{​​{1}}名称。我有一个DNS侧面汽车集装箱,充当自定义NLB API的反向代理。主机/容器端口侦听443,然后nginx容器通过桥接网络通过8080路由到我的API。负载平衡器侦听器正在监听443 / TCP,因为我希望通过nginx容器完成nginx终止。因此,我在NLB上有一个Go直通。我还附加了一个通过443 / tcp接受流量的目标组。此外,我已将安全组附加到群集,从而允许来自网络负载平衡器的所有443以上的流量。尽管如此,当我向API发出请求时,连接到负载均衡器的超时。我觉得我在这里排着鸭子,所以我不知道为什么会这样。我所有的目标群体健康检查都通过了。下面是我的配置,nginx conf,任务定义和错误输出。

TLS

tcp

Configuration:

resource "aws_lb" "myapi_api_lb" {
  name               = "my-api-lb"
  internal           = false
  load_balancer_type = "network"
  subnets            = ["${module.vpc.public_subnets}"]

  enable_deletion_protection = false

  tags = {
     Environment = "dev/demo"
   }
 }

resource "aws_security_group" "ecs_tasks" {
  name        = "ecs-tasks"
  description = "allow inbound access from the LB only"
  vpc_id      = "${module.vpc.vpc_id}"

  ingress {
    protocol        = "TCP"
    from_port       = 443
    to_port         = 443
    cidr_blocks     = ["10.10.1.0/24", "10.10.2.0/24", "10.10.3.0/24"]
  }

  egress {
    protocol    = "-1"
    from_port   = 0
    to_port     = 0
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_lb_target_group" "myapi_api_tg" {
  name     = "myapi-api-tg"
  port     = 443
  protocol = "TCP"
  target_type = "instance"
  vpc_id   = "${module.vpc.vpc_id}"

  stickiness{
    enabled = false
    type = "lb_cookie"
  }
  health_check{
    interval = 10
    port     = 443
    protocol = "TCP"
    healthy_threshold = 2
    unhealthy_threshold = 2
  }
}

resource "aws_lb_listener" "myapi_listener" {
  load_balancer_arn = "${aws_lb.myapi_api_lb.id}"
  port              = "443"
  protocol          = "TCP"

  default_action {
    target_group_arn = "${aws_lb_target_group.myapi_tg.id}"
    type             = "forward"
  }
}

resource "aws_iam_role" "ecs-instance-role" {
    name                = "ecs-instance-role"
    path                = "/"
    assume_role_policy  = "${data.aws_iam_policy_document.ecs-instance-policy.json}"
}

data "aws_iam_policy_document" "ecs-instance-policy" {
    statement {
        actions = ["sts:AssumeRole"]

        principals {
            type        = "Service"
            identifiers = ["ec2.amazonaws.com"]
        }
    }
}

resource "aws_iam_role_policy_attachment" "ecs-instance-role-attachment" {
    role       = "${aws_iam_role.ecs-instance-role.name}"
    policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}

resource "aws_iam_instance_profile" "ecs-instance-profile" {
    name = "ecs-instance-profile"
    path = "/"
    role = "${aws_iam_role.ecs-instance-role.id}"
    provisioner "local-exec" {
      command = "sleep 10"
    }
}

resource "aws_launch_configuration" "ecs-launch-configuration" {
    image_id                    = "ami-123456"
    instance_type               = "t2.medium"
    iam_instance_profile        = "${aws_iam_instance_profile.ecs-instance-profile.id}"

    root_block_device {
      volume_type = "standard"
      volume_size = 25
      delete_on_termination = false
    }

    lifecycle {
      create_before_destroy = true
    }

    security_groups             = ["${aws_security_group.ecs_tasks.id}", "${module.ssh_security_group.this_security_group_id}"]
    associate_public_ip_address = "false"
    key_name                    = "myapiadmin"
    user_data                   = <<EOF
                                  #!/bin/bash
                                  sudo yum update -y ecs-init
                                  echo ECS_CLUSTER=${aws_ecs_cluster.myapi.name} >> /etc/ecs/ecs.config
                                  EOF
}

resource "aws_autoscaling_group" "ecs-autoscaling-group" {
    name                        = "ecs-autoscaling-group"
    max_size                    = 4
    min_size                    = 2
    desired_capacity            = 2
    vpc_zone_identifier         = ["${module.vpc.private_subnets}"]
    launch_configuration        = "${aws_launch_configuration.ecs-launch-configuration.name}"
    health_check_type           = "ELB"
  }

resource "aws_ecs_cluster" "myapi" {
  name = "mygoapis"
}

data "aws_iam_role" "ecs_task_execution_role" {
  name = "ecsTaskExecutionRole"
}

resource "aws_ecs_task_definition" "myapi" {
  family                   = "myapi"
  requires_compatibilities = ["EC2"]
  network_mode = "bridge"
  cpu = 512
  memory = 1024
  execution_role_arn = "${data.aws_iam_role.ecs_task_execution_role.arn}"

  container_definitions = "${file("myapi-service.json")}"
}

resource "aws_ecs_service" "myapiservice" {
  name            = "myapi_service"
  cluster         = "${aws_ecs_cluster.myapi.id}"
  task_definition = "${aws_ecs_task_definition.myapi.arn}"
  launch_type     = "EC2"
  desired_count   = 2

  load_balancer {
    target_group_arn = "${aws_lb_target_group.myapi_tg.id}"
    container_name   = "nginx"
    container_port   = 443
  }

    depends_on = [
    "aws_lb_listener.myapi_listener",
  ]
}

nginx.conf:

events { }
http {
    server {
        listen 80;
        listen [::]:80;     
        server_name www.myapi.com myapi.com;
        return 301 https://$host$request_uri;
    } 

    server {
        listen 443 ssl http2;
        listen [::]:443 ssl http2;  
        server_name www.myapi.com myapi.com;

        ssl_certificate      /etc/certbot/live/myapi.com/fullchain.pem;
        ssl_certificate_key  /etc/certbot/live/myapi.com/privkey.pem;

        ssl_dhparam /etc/ssl/ffdhe2048.pem;

        ssl_protocols TLSv1.3 TLSv1.2 TLSv1.1;
        ssl_prefer_server_ciphers on;
        ssl_ciphers EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA512:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:ECDH+AESGCM:ECDH+AES256:DH+AESGCM:DH+AES256:RSA+AESGCM:!aNULL:!eNULL:!LOW:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS;

        ssl_session_cache shared:TLS:2m;
        ssl_buffer_size 4k;

        ssl_stapling on;
        ssl_stapling_verify on;
        resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare

        add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always;

        location /api/ {
            proxy_pass http://myapi:8080/;

            # Reject requests with unsupported HTTP method
            if ($request_method !~ ^(GET|POST)$) {
                return 405;
            }
        }    
    }
}

Task Definition:

[  
  {
    "name": "nginx",
    "image": "12345678910.dkr.ecr.us-east-1.amazonaws.com/myapi-reverse-proxy:1.0",
    "memory": 256,
    "cpu": 256,
    "essential": true,
    "logConfiguration": {
      "logDriver": "awslogs",
      "options": {
          "awslogs-group": "reverse_proxy_nginx",
          "awslogs-region": "us-east-1",
          "awslogs-stream-prefix": "nginx"
      }
    },
    "portMappings": [
      {
        "hostPort": 443,
        "containerPort": 443
      }
    ],
    "links": [
      "myapi"
    ]
  }, 
  {
    "name": "myapi",
    "image": "12345678910.dkr.ecr.us-east-1.amazonaws.com/myapi:1.0",
    "cpu": 256,
    "memory": 256,
    "essential": true,
    "portMappings": [
      {
        "containerPort": 8080
      }
    ],
    "logConfiguration": {
      "logDriver": "awslogs",
      "options": {
          "awslogs-group": "myapi",
          "awslogs-region": "us-east-1",
          "awslogs-stream-prefix": "api"
      }
    }
  }
]

1 个答案:

答案 0 :(得分:0)

我找到了这个问题的完美解决方案。 Nginx的“ proxy_pass”不能使用“ etc / hosts”信息。 这使docker link选项无法在ECS中使用。

我建议您在ECS中使用HA-Proxy反向代理。 我尝试了nginx反向代理,但失败了。 HA-Proxy并获得成功。 它比nginx配置更简单。

首先,使用Docker的“链接”选项并设置“环境变量”(例如LINK_APP,LINK_PORT)。

第二,将此“环境变量”填充到haproxy.cfg中。

此外,我建议您使用“动态端口映射”到ALB。它使作品更加灵活。

taskdef.json:

# taskdef.json

{
    "executionRoleArn": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<APP_NAME>_ecsTaskExecutionRole",
    "containerDefinitions": [
      {
        "name": "<APP_NAME>-rp",
        "image": "gnokoheat/ecs-reverse-proxy:latest",
        "essential": true,
        "memoryReservation": <MEMORY_RESV>,
        "portMappings": [
          {
            "hostPort": 0,
            "containerPort": 80,
            "protocol": "tcp"
          }
        ],
        "links": [
          "<APP_NAME>"
        ],
        "environment": [
          {
            "name": "LINK_PORT",
            "value": "<SERVICE_PORT>"
          },
          {
            "name": "LINK_APP",
            "value": "<APP_NAME>"
          }
        ]
      },
      {
        "name": "<APP_NAME>",
        "image": "<IMAGE_NAME>",
        "essential": true,
        "memoryReservation": <MEMORY_RESV>,
        "portMappings": [
          {
            "protocol": "tcp",
            "containerPort": <SERVICE_PORT>
          }
        ],
        "environment": [
          {
            "name": "PORT",
            "value": "<SERVICE_PORT>"
          },
          {
            "name": "APP_NAME",
            "value": "<APP_NAME>"
          }
        ]
      }
    ],
    "requiresCompatibilities": [
      "EC2"
    ],
    "networkMode": "bridge",
    "family": "<APP_NAME>"
  }

haproxy.cfg:

# haproxy.cfg

global
    daemon
    pidfile /var/run/haproxy.pid

defaults
    log global
    mode http
    retries 3
    timeout connect 5000
    timeout client 50000
    timeout server 50000

frontend http
    bind *:80

    http-request set-header X-Forwarded-Host %[req.hdr(Host)]

    compression algo gzip
    compression type text/css text/javascript text/plain application/json application/xml

    default_backend app

backend app
    server static "${LINK_APP}":"${LINK_PORT}"

Dockerfile(haproxy):

FROM haproxy:1.7
USER root
COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
  

请参阅:

     

Github:https://github.com/gnokoheat/ecs-reverse-proxy

     

Docker镜像:gnokoheat / ecs-reverse-proxy:latest