因此,我使用以下Terraform文件创建了自动缩放组:
自动缩放组:
resource "aws_autoscaling_group" "orbix-mvp" {
desired_capacity = 1
launch_configuration = "${aws_launch_configuration.orbix-mvp.id}"
max_size = 1
min_size = 1
name = "${var.project}-${var.stage}"
vpc_zone_identifier = ["${aws_subnet.orbix-mvp.*.id}"]
tag {
key = "Name"
value = "${var.project}-${var.stage}"
propagate_at_launch = true
}
tag {
key = "kubernetes.io/cluster/${var.project}-${var.stage}"
value = "owned"
propagate_at_launch = true
}
}
启动配置:
# This data source is included for ease of sample architecture deployment
# and can be swapped out as necessary.
data "aws_region" "current" {}
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
locals {
orbix-mvp-node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.orbix-mvp.endpoint}' --b64-cluster-ca '${aws_eks_cluster.orbix-mvp.certificate_authority.0.data}' '${var.project}-${var.stage}'
USERDATA
}
resource "aws_launch_configuration" "orbix-mvp" {
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.orbix-mvp-node.name}"
image_id = "${data.aws_ami.eks-worker.id}"
instance_type = "c5.large"
name_prefix = "${var.project}-${var.stage}"
security_groups = ["${aws_security_group.orbix-mvp-node.id}"]
user_data_base64 = "${base64encode(local.orbix-mvp-node-userdata)}"
key_name = "devops"
lifecycle {
create_before_destroy = true
}
}
因此,我已将名称为devops
的已生成的SSH密钥添加到启动配置中。我可以使用该密钥通过SSH进入手动创建的EC2实例,但是似乎无法通过该配置通过SSH进入实例。
感谢您的帮助,谢谢:)
编辑:
节点安全组地形:
resource "aws_security_group" "orbix-mvp-node" {
name = "${var.project}-${var.stage}-node"
description = "Security group for all nodes in the ${var.project}-${var.stage} cluster"
vpc_id = "${aws_vpc.orbix-mvp.id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = "${
map(
"Name", "${var.project}-${var.stage}-node",
"kubernetes.io/cluster/${var.project}-${var.stage}", "owned",
)
}"
}
resource "aws_security_group_rule" "demo-node-ingress-self" {
description = "Allow node to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = "${aws_security_group.orbix-mvp-node.id}"
source_security_group_id = "${aws_security_group.orbix-mvp-node.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "demo-node-ingress-cluster" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = "${aws_security_group.orbix-mvp-node.id}"
source_security_group_id = "${aws_security_group.orbix-mvp-cluster.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "demo-node-port-22" {
description = "Add SSH access"
from_port = 22
protocol = "tcp"
security_group_id = "${aws_security_group.orbix-mvp-node.id}"
cidr_blocks = ["0.0.0.0/0"]
to_port = 22
type = "ingress"
}