Skip to content

Commit 54c613e

Browse files
committed
Support AWS Provider V5
1 parent e073e70 commit 54c613e

File tree

5 files changed

+46
-49
lines changed

5 files changed

+46
-49
lines changed

‎.github/workflows/release-branch.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ on:
1010
- 'docs/**'
1111
- 'examples/**'
1212
- 'test/**'
13+
- 'README.*'
1314

1415
permissions:
1516
contents: write

‎.github/workflows/release-published.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@ permissions:
1111

1212
jobs:
1313
terraform-module:
14-
uses: cloudposse/github-actions-workflows-terraform-module/.github/workflows/release.yml@main
14+
uses: cloudposse/github-actions-workflows-terraform-module/.github/workflows/release-published.yml@main

‎examples/complete/main.tf

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,26 +3,22 @@ provider "aws" {
33
}
44

55
module "vpc" {
6-
source = "cloudposse/vpc/aws"
7-
version = "1.1.0"
8-
9-
ipv4_primary_cidr_block = "172.19.0.0/16"
10-
11-
context = module.this.context
6+
source = "cloudposse/vpc/aws"
7+
version = "2.1.0"
8+
ipv4_primary_cidr_block = var.vpc_cidr_block
9+
context = module.this.context
1210
}
1311

1412
module "subnets" {
15-
source = "cloudposse/dynamic-subnets/aws"
16-
version = "2.0.2"
17-
13+
source = "cloudposse/dynamic-subnets/aws"
14+
version = "2.3.0"
1815
availability_zones = var.availability_zones
1916
vpc_id = module.vpc.vpc_id
2017
igw_id = [module.vpc.igw_id]
2118
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
2219
nat_gateway_enabled = false
2320
nat_instance_enabled = false
24-
25-
context = module.this.context
21+
context = module.this.context
2622
}
2723

2824
module "s3_log_storage" {

‎main.tf

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
locals {
22
enabled = module.this.enabled
33

4-
aws_partition = join("", data.aws_partition.current.*.partition)
4+
aws_partition = join("", data.aws_partition.current[*].partition)
55

66
# This dummy bootstrap action is needed because of terraform bug https://github.com/terraform-providers/terraform-provider-aws/issues/12683
77
# When javax.jdo.option.ConnectionPassword is used in configuration_json then every plan will result in force recreation of EMR cluster.
@@ -151,7 +151,7 @@ resource "aws_security_group_rule" "managed_master_egress" {
151151
protocol = "-1"
152152
cidr_blocks = ["0.0.0.0/0"]
153153
ipv6_cidr_blocks = ["::/0"]
154-
security_group_id = join("", aws_security_group.managed_master.*.id)
154+
security_group_id = join("", aws_security_group.managed_master[*].id)
155155
}
156156

157157
resource "aws_security_group" "managed_slave" {
@@ -179,7 +179,7 @@ resource "aws_security_group_rule" "managed_slave_egress" {
179179
protocol = "-1"
180180
cidr_blocks = ["0.0.0.0/0"]
181181
ipv6_cidr_blocks = ["::/0"]
182-
security_group_id = join("", aws_security_group.managed_slave.*.id)
182+
security_group_id = join("", aws_security_group.managed_slave[*].id)
183183
}
184184

185185
resource "aws_security_group" "managed_service_access" {
@@ -205,8 +205,8 @@ resource "aws_security_group_rule" "managed_master_service_access_ingress" {
205205
from_port = 9443
206206
to_port = 9443
207207
protocol = "tcp"
208-
source_security_group_id = join("", aws_security_group.managed_master.*.id)
209-
security_group_id = join("", aws_security_group.managed_service_access.*.id)
208+
source_security_group_id = join("", aws_security_group.managed_master[*].id)
209+
security_group_id = join("", aws_security_group.managed_service_access[*].id)
210210
}
211211

212212
resource "aws_security_group_rule" "managed_service_access_egress" {
@@ -219,7 +219,7 @@ resource "aws_security_group_rule" "managed_service_access_egress" {
219219
protocol = "-1"
220220
cidr_blocks = ["0.0.0.0/0"]
221221
ipv6_cidr_blocks = ["::/0"]
222-
security_group_id = join("", aws_security_group.managed_service_access.*.id)
222+
security_group_id = join("", aws_security_group.managed_service_access[*].id)
223223
}
224224

225225
# Specify additional master and slave security groups
@@ -242,7 +242,7 @@ resource "aws_security_group_rule" "master_ingress_security_groups" {
242242
to_port = 65535
243243
protocol = "tcp"
244244
source_security_group_id = var.master_allowed_security_groups[count.index]
245-
security_group_id = join("", aws_security_group.master.*.id)
245+
security_group_id = join("", aws_security_group.master[*].id)
246246
}
247247

248248
resource "aws_security_group_rule" "master_ingress_cidr_blocks" {
@@ -254,7 +254,7 @@ resource "aws_security_group_rule" "master_ingress_cidr_blocks" {
254254
to_port = 65535
255255
protocol = "tcp"
256256
cidr_blocks = var.master_allowed_cidr_blocks
257-
security_group_id = join("", aws_security_group.master.*.id)
257+
security_group_id = join("", aws_security_group.master[*].id)
258258
}
259259

260260
resource "aws_security_group_rule" "master_egress" {
@@ -266,7 +266,7 @@ resource "aws_security_group_rule" "master_egress" {
266266
to_port = 65535
267267
protocol = "tcp"
268268
cidr_blocks = ["0.0.0.0/0"]
269-
security_group_id = join("", aws_security_group.master.*.id)
269+
security_group_id = join("", aws_security_group.master[*].id)
270270
}
271271

272272
resource "aws_security_group" "slave" {
@@ -288,7 +288,7 @@ resource "aws_security_group_rule" "slave_ingress_security_groups" {
288288
to_port = 65535
289289
protocol = "tcp"
290290
source_security_group_id = var.slave_allowed_security_groups[count.index]
291-
security_group_id = join("", aws_security_group.slave.*.id)
291+
security_group_id = join("", aws_security_group.slave[*].id)
292292
}
293293

294294
resource "aws_security_group_rule" "slave_ingress_cidr_blocks" {
@@ -300,7 +300,7 @@ resource "aws_security_group_rule" "slave_ingress_cidr_blocks" {
300300
to_port = 65535
301301
protocol = "tcp"
302302
cidr_blocks = var.slave_allowed_cidr_blocks
303-
security_group_id = join("", aws_security_group.slave.*.id)
303+
security_group_id = join("", aws_security_group.slave[*].id)
304304
}
305305

306306
resource "aws_security_group_rule" "slave_egress" {
@@ -312,7 +312,7 @@ resource "aws_security_group_rule" "slave_egress" {
312312
to_port = 65535
313313
protocol = "tcp"
314314
cidr_blocks = ["0.0.0.0/0"]
315-
security_group_id = join("", aws_security_group.slave.*.id)
315+
security_group_id = join("", aws_security_group.slave[*].id)
316316
}
317317

318318
/*
@@ -339,7 +339,7 @@ resource "aws_iam_role" "emr" {
339339
count = local.enabled && var.service_role_enabled ? 1 : 0
340340

341341
name = module.label_emr.id
342-
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_emr.*.json)
342+
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_emr[*].json)
343343
permissions_boundary = var.emr_role_permissions_boundary
344344

345345
tags = module.this.tags
@@ -349,7 +349,7 @@ resource "aws_iam_role" "emr" {
349349
resource "aws_iam_role_policy_attachment" "emr" {
350350
count = local.enabled && var.service_role_enabled ? 1 : 0
351351

352-
role = join("", aws_iam_role.emr.*.name)
352+
role = join("", aws_iam_role.emr[*].name)
353353
policy_arn = "arn:${local.aws_partition}:iam::aws:policy/service-role/AmazonElasticMapReduceRole"
354354
}
355355

@@ -379,7 +379,7 @@ resource "aws_iam_role" "ec2" {
379379
count = local.enabled && var.ec2_role_enabled ? 1 : 0
380380

381381
name = module.label_ec2.id
382-
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_ec2.*.json)
382+
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_ec2[*].json)
383383
permissions_boundary = var.ec2_role_permissions_boundary
384384

385385
tags = module.this.tags
@@ -389,7 +389,7 @@ resource "aws_iam_role" "ec2" {
389389
resource "aws_iam_role_policy_attachment" "ec2" {
390390
count = local.enabled && var.ec2_role_enabled ? 1 : 0
391391

392-
role = join("", aws_iam_role.ec2.*.name)
392+
role = join("", aws_iam_role.ec2[*].name)
393393
policy_arn = "arn:${local.aws_partition}:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role"
394394
}
395395

@@ -400,15 +400,15 @@ https://aws.amazon.com/blogs/big-data/securing-access-to-emr-clusters-using-aws-
400400
resource "aws_iam_role_policy_attachment" "emr_ssm_access" {
401401
count = local.enabled && var.ec2_role_enabled && var.enable_ssm_access ? 1 : 0
402402

403-
role = join("", aws_iam_role.ec2.*.name)
403+
role = join("", aws_iam_role.ec2[*].name)
404404
policy_arn = "arn:${local.aws_partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
405405
}
406406

407407
resource "aws_iam_instance_profile" "ec2" {
408408
count = local.enabled && var.ec2_role_enabled ? 1 : 0
409409

410-
name = join("", aws_iam_role.ec2.*.name)
411-
role = join("", aws_iam_role.ec2.*.name)
410+
name = join("", aws_iam_role.ec2[*].name)
411+
role = join("", aws_iam_role.ec2[*].name)
412412
tags = module.this.tags
413413
}
414414

@@ -421,7 +421,7 @@ resource "aws_iam_role" "ec2_autoscaling" {
421421
count = local.enabled && var.ec2_autoscaling_role_enabled ? 1 : 0
422422

423423
name = module.label_ec2_autoscaling.id
424-
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_emr.*.json)
424+
assume_role_policy = join("", data.aws_iam_policy_document.assume_role_emr[*].json)
425425
permissions_boundary = var.ec2_autoscaling_role_permissions_boundary
426426

427427
tags = module.this.tags
@@ -431,7 +431,7 @@ resource "aws_iam_role" "ec2_autoscaling" {
431431
resource "aws_iam_role_policy_attachment" "ec2_autoscaling" {
432432
count = local.enabled && var.ec2_autoscaling_role_enabled ? 1 : 0
433433

434-
role = join("", aws_iam_role.ec2_autoscaling.*.name)
434+
role = join("", aws_iam_role.ec2_autoscaling[*].name)
435435
policy_arn = "arn:${local.aws_partition}:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
436436
}
437437

@@ -445,12 +445,12 @@ resource "aws_emr_cluster" "default" {
445445
ec2_attributes {
446446
key_name = var.key_name
447447
subnet_id = var.subnet_id
448-
emr_managed_master_security_group = var.use_existing_managed_master_security_group == false ? join("", aws_security_group.managed_master.*.id) : var.managed_master_security_group
449-
emr_managed_slave_security_group = var.use_existing_managed_slave_security_group == false ? join("", aws_security_group.managed_slave.*.id) : var.managed_slave_security_group
450-
service_access_security_group = var.use_existing_service_access_security_group == false && var.subnet_type == "private" ? join("", aws_security_group.managed_service_access.*.id) : var.service_access_security_group
451-
instance_profile = var.ec2_role_enabled ? join("", aws_iam_instance_profile.ec2.*.arn) : var.existing_ec2_instance_profile_arn
452-
additional_master_security_groups = var.use_existing_additional_master_security_group == false ? join("", aws_security_group.master.*.id) : var.additional_master_security_group
453-
additional_slave_security_groups = var.use_existing_additional_slave_security_group == false ? join("", aws_security_group.slave.*.id) : var.additional_slave_security_group
448+
emr_managed_master_security_group = var.use_existing_managed_master_security_group == false ? join("", aws_security_group.managed_master[*].id) : var.managed_master_security_group
449+
emr_managed_slave_security_group = var.use_existing_managed_slave_security_group == false ? join("", aws_security_group.managed_slave[*].id) : var.managed_slave_security_group
450+
service_access_security_group = var.use_existing_service_access_security_group == false && var.subnet_type == "private" ? join("", aws_security_group.managed_service_access[*].id) : var.service_access_security_group
451+
instance_profile = var.ec2_role_enabled ? join("", aws_iam_instance_profile.ec2[*].arn) : var.existing_ec2_instance_profile_arn
452+
additional_master_security_groups = var.use_existing_additional_master_security_group == false ? join("", aws_security_group.master[*].id) : var.additional_master_security_group
453+
additional_slave_security_groups = var.use_existing_additional_slave_security_group == false ? join("", aws_security_group.slave[*].id) : var.additional_slave_security_group
454454
}
455455

456456
termination_protection = var.termination_protection
@@ -542,8 +542,8 @@ resource "aws_emr_cluster" "default" {
542542

543543
log_uri = var.log_uri
544544

545-
service_role = var.service_role_enabled ? join("", aws_iam_role.emr.*.arn) : var.existing_service_role_arn
546-
autoscaling_role = var.ec2_autoscaling_role_enabled ? join("", aws_iam_role.ec2_autoscaling.*.arn) : var.existing_ec2_autoscaling_role_arn
545+
service_role = var.service_role_enabled ? join("", aws_iam_role.emr[*].arn) : var.existing_service_role_arn
546+
autoscaling_role = var.ec2_autoscaling_role_enabled ? join("", aws_iam_role.ec2_autoscaling[*].arn) : var.existing_ec2_autoscaling_role_arn
547547

548548
# configurations_json changes are ignored because of terraform bug. Configuration changes are applied via local.bootstrap_action.
549549
lifecycle {
@@ -559,7 +559,7 @@ resource "aws_emr_instance_group" "task" {
559559
count = local.enabled && var.create_task_instance_group ? 1 : 0
560560

561561
name = module.label_task.id
562-
cluster_id = join("", aws_emr_cluster.default.*.id)
562+
cluster_id = join("", aws_emr_cluster.default[*].id)
563563

564564
instance_type = var.task_instance_group_instance_type
565565
instance_count = var.task_instance_group_instance_count
@@ -584,7 +584,7 @@ module "dns_master" {
584584

585585
dns_name = var.master_dns_name != null && var.master_dns_name != "" ? var.master_dns_name : "emr-master-${module.this.name}"
586586
zone_id = var.zone_id
587-
records = coalescelist(aws_emr_cluster.default.*.master_public_dns, [""])
587+
records = coalescelist(aws_emr_cluster.default[*].master_public_dns, [""])
588588

589589
context = module.this.context
590590
}

‎outputs.tf

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,25 @@
11
output "cluster_id" {
2-
value = join("", aws_emr_cluster.default.*.id)
2+
value = join("", aws_emr_cluster.default[*].id)
33
description = "EMR cluster ID"
44
}
55

66
output "cluster_name" {
7-
value = join("", aws_emr_cluster.default.*.name)
7+
value = join("", aws_emr_cluster.default[*].name)
88
description = "EMR cluster name"
99
}
1010

1111
output "master_public_dns" {
12-
value = join("", aws_emr_cluster.default.*.master_public_dns)
12+
value = join("", aws_emr_cluster.default[*].master_public_dns)
1313
description = "Master public DNS"
1414
}
1515

1616
output "master_security_group_id" {
17-
value = join("", aws_security_group.master.*.id)
17+
value = join("", aws_security_group.master[*].id)
1818
description = "Master security group ID"
1919
}
2020

2121
output "slave_security_group_id" {
22-
value = join("", aws_security_group.slave.*.id)
22+
value = join("", aws_security_group.slave[*].id)
2323
description = "Slave security group ID"
2424
}
2525

@@ -29,6 +29,6 @@ output "master_host" {
2929
}
3030

3131
output "ec2_role" {
32-
value = var.ec2_role_enabled ? join("", aws_iam_role.ec2.*.name) : null
32+
value = var.ec2_role_enabled ? join("", aws_iam_role.ec2[*].name) : null
3333
description = "Role name of EMR EC2 instances so users can attach more policies"
3434
}

0 commit comments

Comments
 (0)