terraform-在集群中未找到容器实例

时间:2020-01-04 13:25:54

标签: amazon-web-services terraform terraform-provider-aws

我使用Terraform部署ecs。

当我运行terraform apply时一切正常,但是当我在“事件”选项卡上浏览至ecs服务时,出现此错误:

service nginx-ecs-service was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.

.. 我该如何解决?我的Terraform文件中缺少什么?

locals {
  name        = "myapp"
  environment = "prod"

  # This is the convention we use to know what belongs to each other
  ec2_resources_name = "${local.name}-${local.environment}"
}

resource "aws_iam_server_certificate" "lb_cert" {
  name              = "lb_cert"
  certificate_body  = "${file("./www.example.com/cert.pem")}"
  private_key       = "${file("./www.example.com/privkey.pem")}"
  certificate_chain = "${file("./www.example.com/chain.pem")}"
}


resource "aws_security_group" "bastion-sg" {
  name   = "bastion-security-group"
  vpc_id = "${module.vpc.vpc_id}"

  ingress {
    protocol    = "tcp"
    from_port   = 22
    to_port     = 22
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    protocol    = -1
    from_port   = 0
    to_port     = 0
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_instance" "bastion" {
  depends_on = ["aws_security_group.bastion-sg"]

  ami                         = "ami-0d5d9d301c853a04a"
  key_name                    = "myapp"
  instance_type               = "t2.micro"
  vpc_security_group_ids      = ["${aws_security_group.bastion-sg.id}"]
  associate_public_ip_address = true

  subnet_id = "${element(module.vpc.public_subnets, 0)}"

  tags = {
    Name = "bastion"
  }
}

# VPC Definition
module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  version = "~> 2.0"

  name = "my-vpc"
  cidr = "10.1.0.0/16"

  azs = ["us-east-2a", "us-east-2b", "us-east-2c"]

  private_subnets = ["10.1.1.0/24", "10.1.2.0/24", "10.1.3.0/24"]
  public_subnets  = ["10.1.101.0/24", "10.1.102.0/24", "10.1.103.0/24"]

  single_nat_gateway   = true
  enable_nat_gateway   = true
  enable_vpn_gateway   = false
  enable_dns_hostnames = true

  public_subnet_tags = {
    Name = "public"
  }

  private_subnet_tags = {
    Name = "private"
  }

  public_route_table_tags = {
    Name = "public-RT"
  }

  private_route_table_tags = {
    Name = "private-RT"
  }

  tags = {
    Environment = local.environment
    Name        = local.name
  }
}

# ------------
resource "aws_ecs_cluster" "public-ecs-cluster" {
  name = "myapp-${local.environment}"

  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_security_group" "ecs-vpc-secgroup" {
  name        = "ecs-vpc-secgroup"
  description = "ecs-vpc-secgroup"
  # vpc_id      = "vpc-b8daecde"
  vpc_id = "${module.vpc.vpc_id}"

  ingress {
    from_port   = 0
    to_port     = 65535
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "ecs-security-group"
  }
}

resource "aws_lb" "nginx-ecs-alb" {
  name               = "nginx-ecs-alb"
  internal           = false
  load_balancer_type = "application"
  subnets            = module.vpc.public_subnets
  security_groups    = ["${aws_security_group.ecs-vpc-secgroup.id}"]
}

resource "aws_alb_target_group" "nginx-ecs-tg" {
  name     = "nginx-ecs-tg"
  port     = "80"
  protocol = "HTTP"
  vpc_id   = "${module.vpc.vpc_id}"

  health_check {
    healthy_threshold   = 3
    unhealthy_threshold = 10
    timeout             = 5
    interval            = 10
    path                = "/"
  }

  depends_on = ["aws_lb.nginx-ecs-alb"]
}

resource "aws_alb_listener" "alb_listener" {
  load_balancer_arn = "${aws_lb.nginx-ecs-alb.arn}"
  port              = "80"
  protocol          = "HTTP"

  default_action {
    target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
    type             = "forward"
  }
}

resource "aws_ecs_task_definition" "nginx-image" {
  family                = "nginx-server"
  network_mode          = "bridge"
  container_definitions = <<DEFINITION
    [
      {
        "name": "nginx-web",
        "image": "nginx:latest",
        "essential": true,
        "portMappings": [
          {
            "containerPort": 80,
            "hostPort": 0,
            "protocol": "tcp"
          }
        ],
        "memory": 128,
        "cpu": 10
      }
    ]
    DEFINITION
}

data "aws_ecs_task_definition" "nginx-image" {
  depends_on      = ["aws_ecs_task_definition.nginx-image"]
  task_definition = "${aws_ecs_task_definition.nginx-image.family}"
}


resource "aws_launch_configuration" "ecs-launch-configuration" {
  name                 = "ecs-launch-configuration"
  image_id             = "ami-0d5d9d301c853a04a"
  instance_type        = "t2.micro"
  iam_instance_profile = "ecsInstanceRole"

  root_block_device {
    volume_type           = "standard"
    volume_size           = 35
    delete_on_termination = true
  }

  security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
  associate_public_ip_address = "true"
  key_name                    = "myapp"
  user_data                   = <<-EOF
                                      #!/bin/bash
                                      echo ECS_CLUSTER=${aws_ecs_cluster.public-ecs-cluster.name} >> /etc/ecs/ecs.config
                                    EOF
}

resource "aws_autoscaling_group" "ecs-autoscaling-group" {
  name             = "ecs-autoscaling-group"
  max_size         = "1"
  min_size         = "1"
  desired_capacity = "1"
  # vpc_zone_identifier  = ["subnet-5c66053a", "subnet-9cd1a2d4"]
  vpc_zone_identifier = module.vpc.public_subnets

  launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
  health_check_type    = "EC2"
  default_cooldown     = "300"


  lifecycle {
    create_before_destroy = true
  }

  tag {
    key                 = "Name"
    value               = "wizardet972_ecs-instance"
    propagate_at_launch = true
  }

  tag {
    key                 = "Owner"
    value               = "Wizardnet972"
    propagate_at_launch = true
  }

}


resource "aws_autoscaling_policy" "ecs-scale" {
  name                      = "ecs-scale-policy"
  policy_type               = "TargetTrackingScaling"
  autoscaling_group_name    = "${aws_autoscaling_group.ecs-autoscaling-group.name}"
  estimated_instance_warmup = 60

  target_tracking_configuration {
    predefined_metric_specification {
      predefined_metric_type = "ASGAverageCPUUtilization"
    }

    target_value = "70"
  }
}

resource "aws_ecs_service" "nginx-ecs-service" {
  name    = "nginx-ecs-service"
  cluster = "${aws_ecs_cluster.public-ecs-cluster.id}"
  task_definition = "${aws_ecs_task_definition.nginx-image.family}:${max("${aws_ecs_task_definition.nginx-image.revision}", "${aws_ecs_task_definition.nginx-image.revision}")}"

  launch_type = "EC2"

  desired_count = 1

  load_balancer {
    target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
    container_name   = "nginx-web"
    container_port   = 80
  }

  depends_on = ["aws_ecs_task_definition.nginx-image"]
}

1 个答案:

答案 0 :(得分:1)

更新:

我尝试创建您与我共享的Terraform堆栈,因此能够重现该问题。

问题是ec2实例运行不正常,并且自动伸缩组正在不断终止该实例并启动一个新实例。

解决方案是删除以下配置。我认为volume_type标准引起了麻烦。

root_block_device {
  volume_type = "standard"
  volume_size = 100
  delete_on_termination = true
}

查看您是否已完成准备ec2实例的基本步骤。您应该使用ecs-optimized ami创建实例,然后将AmazonEC2ContainerServiceforEC2Role权限附加到IAM角色。

参考:

AWS ECS Error when running task: No Container Instances were found in your cluster

设置实例角色-http://blog.shippable.com/setup-a-container-cluster-on-aws-with-terraform-part-2-provision-a-cluster

希望这会有所帮助,祝你好运。