This repository has been archived by the owner on Nov 28, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ec2_resources.tf
202 lines (184 loc) · 7.71 KB
/
ec2_resources.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
data "aws_availability_zones" "all" {}
data "aws_ami" "amazon_linux_2" {
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-hvm-2.0.*-x86_64-gp2"]
}
owners = ["amazon"]
}
resource "aws_launch_configuration" "cyral-sidecar-lc" {
# Launch configuration for sidecar instances that will run containers
name_prefix = "${var.name_prefix}-autoscaling-"
image_id = var.ami_id != "" ? var.ami_id : data.aws_ami.amazon_linux_2.id
instance_type = var.instance_type
iam_instance_profile = aws_iam_instance_profile.sidecar_profile.name
key_name = var.key_name
associate_public_ip_address = var.associate_public_ip_address
security_groups = concat(
[aws_security_group.instance.id],
var.additional_security_groups
)
metadata_options {
# So docker can access ec2 metadata
# see https://github.com/aws/aws-sdk-go/issues/2972
http_endpoint = "enabled"
http_tokens = "optional"
http_put_response_hop_limit = 2
}
root_block_device {
delete_on_termination = true
encrypted = true
volume_size = var.volume_size
volume_type = "gp2"
}
user_data = <<-EOT
#!/bin/bash -xe
${lookup(var.custom_user_data, "pre")}
${local.cloud_init_pre}
echo "Downloading sidecar.compose.yaml..."
function download_sidecar () {
local url="${local.protocol}://${var.control_plane}/deploy/sidecar.compose.yaml?TemplateVersion=${var.sidecar_version}&TemplateType=terraform&LogIntegration=${var.log_integration}&MetricsIntegration=${var.metrics_integration}&HCVaultIntegrationID=${var.hc_vault_integration_id}&WiresEnabled=${join(",", var.repositories_supported)}"
echo "Trying to download the sidecar template from: $url"
if [[ $(${local.curl} -s -o /home/ec2-user/sidecar.compose.yaml -w "%%{http_code}" -L "$url") = 200 ]]; then
return 0
fi
return 1
}
retry download_sidecar
echo "Fetching secrets..."
aws secretsmanager get-secret-value --secret-id ${var.secrets_location} --query SecretString --output text \
--region ${data.aws_region.current.name} | jq -r 'select(.containerRegistryKey != null) | .containerRegistryKey' | base64 --decode > /home/ec2-user/cyral/container_registry_key.json
until [ -f /home/ec2-user/cyral/container_registry_key.json ]; do echo "wait"; sleep 1; done
cat >> /home/ec2-user/.bash_profile << EOF
if [[ ${var.container_registry} == *".amazonaws.com"* ]]; then
echo "Logging in to AWS ECR..."
eval $(aws ecr --no-include-email get-login --region ${data.aws_region.current.name})
elif [ -s /home/ec2-user/cyral/container_registry_key.json ]; then
echo "Logging in to GCR..."
cat /home/ec2-user/cyral/container_registry_key.json | docker login -u ${var.container_registry_username} --password-stdin https://gcr.io
else
echo "Won't log in automatically to any image registry. Image registry set to: ${var.container_registry}"
fi
EOF
${local.cloud_init_post}
${lookup(var.custom_user_data, "post")}
EOT
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "cyral-sidecar-asg" {
# Autoscaling group of immutable sidecar instances
count = var.asg_count
name = "${var.name_prefix}-asg"
launch_configuration = aws_launch_configuration.cyral-sidecar-lc.id
vpc_zone_identifier = var.subnets
min_size = var.asg_min
desired_capacity = var.asg_desired
max_size = var.asg_max
health_check_grace_period = var.health_check_grace_period
health_check_type = "ELB"
target_group_arns = [for tg in aws_lb_target_group.cyral-sidecar-tg : tg.id]
tag {
key = "Name"
value = "${var.name_prefix}-instance"
propagate_at_launch = true
}
tag {
key = "SidecarVersion"
value = var.sidecar_version
propagate_at_launch = true
}
# Delete existing hosts before starting a new one
lifecycle {
create_before_destroy = false
}
}
resource "aws_security_group" "instance" {
name = "${var.name_prefix}-instance"
vpc_id = var.vpc_id
# Allow SSH inbound
dynamic "ingress" {
for_each = (length(var.ssh_inbound_cidr) > 0 || length(var.ssh_inbound_security_group) > 0) ? [1] : []
content {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = var.ssh_inbound_cidr
security_groups = var.ssh_inbound_security_group
}
}
# If reduce_security_group_rules_count is true, it will create DB Inbound Rules per CIDR using
# a port range (between the smallest and the biggest sidecar port). Otherwise, it will
# create DB Inbound Rules per sidecar port and CIDR (Cartesian Product between Ports x CIDRs).
# Notice that the ingress block accepts a list of CIDRs (cidr_blocks), which internally will
# create one ingress rule per CIDR. This is an AWS limitation, which doesnt allow creating a
# single ingress rule for a list of CIDRs.
dynamic "ingress" {
for_each = var.reduce_security_group_rules_count ? [1] : var.sidecar_ports
content {
description = "DB"
from_port = var.reduce_security_group_rules_count ? min(var.sidecar_ports...) : ingress.value
to_port = var.reduce_security_group_rules_count ? max(var.sidecar_ports...) : ingress.value
protocol = "tcp"
cidr_blocks = var.db_inbound_cidr
security_groups = var.db_inbound_security_group
}
}
# Allow healthcheck inbound
ingress {
description = "Sidecar - Healthcheck"
from_port = var.healthcheck_port
to_port = var.healthcheck_port
protocol = "tcp"
# A network load balancer has no security group:
# https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#target-security-groups
cidr_blocks = var.healthcheck_inbound_cidr # TODO - change this to LB IP only
}
# Allow all outbound
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_lb" "cyral-lb" {
# Core load balancer
name = "${var.name_prefix}-lb"
internal = var.load_balancer_scheme == "internet-facing" ? false : true
load_balancer_type = "network"
subnets = length(var.load_balancer_subnets) > 0 ? var.load_balancer_subnets : var.subnets
enable_cross_zone_load_balancing = var.enable_cross_zone_load_balancing
}
resource "aws_lb_target_group" "cyral-sidecar-tg" {
for_each = { for port in var.sidecar_ports : tostring(port) => port }
name = "${var.name_prefix}-tg${each.value}"
port = each.value
protocol = "TCP"
vpc_id = var.vpc_id
health_check {
port = var.healthcheck_port
protocol = "TCP"
}
deregistration_delay = 0
stickiness {
enabled = contains(var.load_balancer_sticky_ports, each.value) ? true : false
type = "source_ip"
}
}
resource "aws_lb_listener" "cyral-sidecar-lb-ls" {
# Listener for load balancer - all existing sidecar ports
for_each = { for port in var.sidecar_ports : tostring(port) => port }
load_balancer_arn = aws_lb.cyral-lb.arn
port = each.value
# Snowflake listeners use TLS and the provided certificate
protocol = contains(var.load_balancer_tls_ports, tonumber(each.value)) ? "TLS" : "TCP"
certificate_arn = contains(var.load_balancer_tls_ports, tonumber(each.value)) ? var.load_balancer_certificate_arn : null
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.cyral-sidecar-tg[each.key].arn
}
}