我正在尝试通过Kinesis Firehose将AWS云观察日志流式传输到ES。下面的terraform代码给出了错误。有什么建议.. 错误是:
resource "aws_s3_bucket" "bucket" {
bucket = "cw-kinesis-es-bucket"
acl = "private"
}
resource "aws_iam_role" "firehose_role" {
name = "firehose_test_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_elasticsearch_domain" "es" {
domain_name = "firehose-es-test"
elasticsearch_version = "1.5"
cluster_config {
instance_type = "t2.micro.elasticsearch"
}
ebs_options {
ebs_enabled = true
volume_size = 10
}
advanced_options {
"rest.action.multi.allow_explicit_index" = "true"
}
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "es:*",
"Principal": "*",
"Effect": "Allow",
"Condition": {
"IpAddress": {"aws:SourceIp": ["xxxxx"]}
}
}
]
}
CONFIG
snapshot_options {
automated_snapshot_start_hour = 23
}
tags {
Domain = "TestDomain"
}
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
name = "terraform-kinesis-firehose-test-stream"
destination = "elasticsearch"
s3_configuration {
role_arn = "${aws_iam_role.firehose_role.arn}"
bucket_arn = "${aws_s3_bucket.bucket.arn}"
buffer_size = 10
buffer_interval = 400
compression_format = "GZIP"
}
elasticsearch_configuration {
domain_arn = "${aws_elasticsearch_domain.es.arn}"
role_arn = "${aws_iam_role.firehose_role.arn}"
index_name = "test"
type_name = "test"
}
}
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_cloudwatch_log_subscription_filter" "test_kinesis_logfilter" {
name = "test_kinesis_logfilter"
role_arn = "${aws_iam_role.iam_for_lambda.arn}"
log_group_name = "loggorup.log"
filter_pattern = ""
destination_arn = "${aws_kinesis_firehose_delivery_stream.test_stream.arn}"
}
resource "aws_s3_bucket" "bucket" {
bucket = "cw-kinesis-es-bucket"
acl = "private"
}
resource "aws_iam_role" "firehose_role" {
name = "firehose_test_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_elasticsearch_domain" "es" {
domain_name = "firehose-es-test"
elasticsearch_version = "1.5"
cluster_config {
instance_type = "t2.micro.elasticsearch"
}
ebs_options {
ebs_enabled = true
volume_size = 10
}
advanced_options {
"rest.action.multi.allow_explicit_index" = "true"
}
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "es:*",
"Principal": "*",
"Effect": "Allow",
"Condition": {
"IpAddress": {"aws:SourceIp": ["xxxxx"]}
}
}
]
}
CONFIG
snapshot_options {
automated_snapshot_start_hour = 23
}
tags {
Domain = "TestDomain"
}
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
name = "terraform-kinesis-firehose-test-stream"
destination = "elasticsearch"
s3_configuration {
role_arn = "${aws_iam_role.firehose_role.arn}"
bucket_arn = "${aws_s3_bucket.bucket.arn}"
buffer_size = 10
buffer_interval = 400
compression_format = "GZIP"
}
elasticsearch_configuration {
domain_arn = "${aws_elasticsearch_domain.es.arn}"
role_arn = "${aws_iam_role.firehose_role.arn}"
index_name = "test"
type_name = "test"
}
}
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_cloudwatch_log_subscription_filter" "test_kinesis_logfilter" {
name = "test_kinesis_logfilter"
role_arn = "${aws_iam_role.iam_for_lambda.arn}"
log_group_name = "loggorup.log"
filter_pattern = ""
destination_arn = "${aws_kinesis_firehose_delivery_stream.test_stream.arn}"
}
答案 0 :(得分:10)
在此配置中,您指示Cloudwatch日志将日志记录发送到Kinesis Firehose,然后Kinesis Firehose将其接收的数据写入S3和ElasticSearch。因此,您使用的AWS服务正在按如下方式相互通信:
为了让一个AWS服务与另一个服务通信,第一个服务必须承担一个授予其访问权限的角色。在IAM术语中,“假设一个角色”意味着临时使用授予该角色的特权。 AWS IAM角色有两个关键部分:
这里需要两个独立的角色。一个角色将授予Cloudwatch Logs与Kinesis Firehose通话的权限,而第二个角色将授予Kinesis Firehose访问权限以与S3和ElasticSearch进行通信。
对于本答复的其余部分,我将假设Terraform作为具有AWS账户完全管理访问权限的用户运行。如果不是这样,首先必须确保Terraform作为IAM主体运行,该主体有权创建和传递角色。
在问题中给出的示例中,aws_cloudwatch_log_subscription_filter
的{{1}} role_arn
适用于AWS Lambda,因此Cloudwatch日志无法承担此角色。
要解决此问题,可以更改假定角色策略以使用Cloudwatch日志的服务名称:
assume_role_policy
以上允许Cloudwatch Logs服务承担该角色。现在,该角色需要一个允许写入Firehose Delivery Stream的访问策略:
resource "aws_iam_role" "cloudwatch_logs" {
name = "cloudwatch_logs_to_firehose"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "logs.us-east-1.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
以上内容授予Cloudwatch Logs服务访问权限,只要它针对此Terraform配置创建的特定传输流,就可以调用任何 Kinesis Firehose操作。这比完全必要的访问更多;有关详细信息,请参阅Actions and Condition Context Keys for Amazon Kinesis Firehose。
要完成此操作,必须更新resource "aws_iam_role_policy" "cloudwatch_logs" {
role = "${aws_iam_role.cloudwatch_logs.name}"
policy = <<EOF
{
"Statement": [
{
"Effect": "Allow",
"Action": ["firehose:*"],
"Resource": ["${aws_kinesis_firehose_delivery_stream.test_stream.arn}"]
}
]
}
EOF
}
资源以引用此新角色:
aws_cloudwatch_log_subscription_filter
不幸的是,由于AWS IAM的内部设计,在Terraform提交之后,策略更改通常需要几分钟才能生效,因此有时在尝试使用a创建新资源时会出现与策略相关的错误政策本身创建后很快就制定了政策。在这种情况下,通常只需等待10分钟然后再次运行Terraform就足够了,此时它应该从它停止的地方恢复并重试创建资源。
问题中给出的示例已经具有IAM角色,并为Kinesis Firehose提供了合适的假设角色策略:
resource "aws_cloudwatch_log_subscription_filter" "test_kinesis_logfilter" {
name = "test_kinesis_logfilter"
role_arn = "${aws_iam_role.cloudwatch_logs.arn}"
log_group_name = "loggorup.log"
filter_pattern = ""
destination_arn = "${aws_kinesis_firehose_delivery_stream.test_stream.arn}"
# Wait until the role has required access before creating
depends_on = ["aws_iam_role_policy.cloudwatch_logs"]
}
以上授予Kinesis Firehose访问权限以承担此角色。和以前一样,此角色还需要一个访问策略,以授予用户对目标S3存储桶的角色访问权限:
resource "aws_iam_role" "firehose_role" {
name = "firehose_test_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
上述策略允许Kinesis Firehose对创建的S3存储桶执行任何操作,对创建的ElasticSearch域执行任何操作,并将日志事件写入Cloudwatch日志中的任何日志流。最后一部分并非绝对必要,但如果为Firehose Delivery Stream启用了日志记录,则很重要,否则Kinesis Firehose无法将日志写回Cloudwatch日志。
同样,这是非常必要的访问权限。有关支持的特定操作的详细信息,请参阅以下参考:
由于此单个角色有权写入S3和ElasticSearch,因此可以在Kinesis Firehose传输流中为这两种传递配置指定它:
resource "aws_iam_role_policy" "firehose_role" {
role = "${aws_iam_role.firehose_role.name}"
policy = <<EOF
{
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:*"],
"Resource": ["${aws_s3_bucket.bucket.arn}"]
},
{
"Effect": "Allow",
"Action": ["es:ESHttpGet"],
"Resource": ["${aws_elasticsearch_domain.es.arn}/*"]
},
{
"Effect": "Allow",
"Action": [
"logs:PutLogEvents"
],
"Resource": [
"arn:aws:logs:*:*:log-group:*:log-stream:*"
]
}
]
}
EOF
}
完成上述所有布线后,服务应具备连接此交付管道部分所需的访问权限。
这种相同的通用模式适用于两个AWS服务之间的任何连接。每个案例所需的重要信息是:
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
name = "terraform-kinesis-firehose-test-stream"
destination = "elasticsearch"
s3_configuration {
role_arn = "${aws_iam_role.firehose_role.arn}"
bucket_arn = "${aws_s3_bucket.bucket.arn}"
buffer_size = 10
buffer_interval = 400
compression_format = "GZIP"
}
elasticsearch_configuration {
domain_arn = "${aws_elasticsearch_domain.es.arn}"
role_arn = "${aws_iam_role.firehose_role.arn}"
index_name = "test"
type_name = "test"
}
# Wait until access has been granted before creating the firehose
# delivery stream.
depends_on = ["aws_iam_role_policy.firehose_role"]
}
或logs.us-east-1.amazonaws.com
。遗憾的是,这些文档记录很少,很难找到,但通常可以在每个服务的用户指南中的策略示例中找到。