From 35043b3acef7280138d481b5fd56362f8a40b4a8 Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Tue, 16 Aug 2022 01:08:00 +0800 Subject: [PATCH 01/11] Full upgrade in progress to Infra v3 --- .dockerignore | 2 - .gitignore | 73 +- .hadolint.yml | 13 - .pre-commit-config.yaml | 7 +- .terraform.lock.hcl | 164 --- .tfsec.yml | 1 + LICENSE | 695 ++++++++++- README.md | 120 +- VERSION | 2 +- aws-cluster-autoscaler/.gitignore | 2 + .../README.md | 0 .../cluster-autoscaler.tf | 2 +- .../iam-policy.tf | 10 +- .../locals.tf | 0 aws-cluster-autoscaler/variables.tf | 28 + aws-support/.gitignore | 2 + .../aws-support => aws-support}/data.tf | 0 aws-support/kms-secrets-access.tf | 66 + aws-support/route53.cert-manager.tf | 48 + aws-support/route53.external-dns.tf | 20 + aws-support/s3-infrastructure-buckets.tf | 88 ++ .../storage-class.efs.security-groups.tf | 13 +- .../storage-class.efs.tf | 11 +- .../storage-class.gp2-retain.tf | 1 + .../storage-class.gp3.tf | 5 +- aws-support/storage-class.iam-policies.tf | 26 + .../storage-class.st1.tf | 1 + .../aws-support => aws-support}/variables.tf | 8 + cluster-aws-auth-cm.tf | 15 + cluster-aws.tf | 42 + cluster-nodegroups.tf | 70 ++ cluster.tf | 140 ++- custom_node_groups.tf | 50 - examples/basic/data.tf | 1 + examples/basic/main.tf | 68 +- examples/basic/providers.tf | 28 +- examples/basic/vars.tf | 11 + examples/basic/versions.tf | 28 + examples/extended-solution-gitlab/data.tf | 1 + .../gitlab-management/gitlab-admin.tf | 79 -- .../gitlab-management/vars.tf | 19 - .../gitlab-management/versions.tf | 14 - examples/extended-solution-gitlab/main.tf | 13 - .../extended-solution-gitlab/providers.tf | 27 +- examples/full-solution-k8s/data.tf | 1 + examples/full-solution-k8s/main.tf | 70 ++ examples/full-solution-k8s/providers.tf | 33 + examples/full-solution-k8s/vars.tf | 19 + .../datadog-infrastructure.tf | 34 - .../datadog-dashboard/src/values.v2.8.3.yaml | 1110 ----------------- examples/optional-components-k8s/main.tf | 16 - examples/optional-components-k8s/providers.tf | 3 - examples/optional-components-k8s/vars.tf | 13 - examples/standard/data.tf | 1 + examples/standard/providers.tf | 20 +- kubernetes-helm.tf | 130 ++ kubernetes-namespaces.tf | 14 + kubernetes-secret.tf | 41 + kubernetes.tf | 195 --- locals.tf | 69 +- outputs.tf | 28 +- provider.tf | 9 + provisioning/kubernetes/argocd/argocd.tf | 41 + provisioning/kubernetes/argocd/data.tf | 71 ++ provisioning/kubernetes/argocd/locals.tf | 22 + .../kubernetes/argocd/registry-secrets.tf | 26 + .../argocd/repository-credential-templates.tf | 17 + .../kubernetes/argocd/repository-secrets.tf | 32 + provisioning/kubernetes/argocd/variables.tf | 10 + provisioning/kubernetes/argocd/version.tf | 9 + provisioning/kubernetes/aws-support/README.md | 19 - .../efs-service-account-values.yaml | 10 - .../aws-support/efs-serviceaccount.tf | 14 - .../kubernetes/aws-support/iam-policy.efs.tf | 74 -- .../kubernetes/aws-support/iam-policy.gp3.tf | 191 --- .../src/autoscaler-iam-policy.json | 18 - .../aws-support/src/ebs-storage-class.yml | 6 - .../aws-support/src/efs-storage-class.yml | 8 - .../kubernetes/certmanager/certmanager.tf | 13 +- .../{src => issuer_examples}/.gitignore | 0 .../develop/kubectl-apply-example.sh | 0 .../develop/kubernetes-cert-management.yaml | 0 .../prod/kubernetes-cert-management.yaml | 0 .../prod2/kubernetes-cert-management.yaml | 0 .../stag/kubernetes-cert-management.yaml | 0 .../cluster-autoscaler/variables.tf | 10 - .../elastic-stack/elasticstack-kibana.tf | 1 + .../elastic-stack/elasticstack-logstash.tf | 4 +- provisioning/kubernetes/elastic-stack/s3.tf | 10 +- provisioning/kubernetes/grafana/grafana.tf | 2 +- provisioning/kubernetes/grafana/variables.tf | 6 +- .../kubernetes/hashicorp-consul/consul.tf | 9 +- .../kubernetes/hashicorp-consul/variables.tf | 11 + .../kubernetes/hashicorp-vault/variables.tf | 14 + .../hashicorp-vault/vault-aws-kms.tf | 8 +- .../kubernetes/hashicorp-vault/vault.tf | 13 +- .../kubernetes-dashboard.tf | 5 +- .../kubernetes-dashboard-admin.rbac.yaml | 19 - .../kubernetes-dashboard-recommended.yaml | 302 ----- .../kubernetes-dashboard/variables.tf | 5 + .../kubernetes/nginx-controller/domain.tf | 108 +- .../nginx-controller/nginx-controller.tf | 3 +- .../kubernetes/nginx-controller/variables.tf | 11 +- route53.tf | 6 + tools/gitlab-terraform.sh | 9 - variables.tf | 394 ++++-- versions.tf | 13 +- vpc_infrastructure.tf | 96 +- 108 files changed, 2564 insertions(+), 2896 deletions(-) delete mode 100644 .dockerignore delete mode 100644 .hadolint.yml delete mode 100644 .terraform.lock.hcl create mode 100644 aws-cluster-autoscaler/.gitignore rename {provisioning/kubernetes/cluster-autoscaler => aws-cluster-autoscaler}/README.md (100%) rename {provisioning/kubernetes/cluster-autoscaler => aws-cluster-autoscaler}/cluster-autoscaler.tf (97%) rename {provisioning/kubernetes/cluster-autoscaler => aws-cluster-autoscaler}/iam-policy.tf (86%) rename {provisioning/kubernetes/cluster-autoscaler => aws-cluster-autoscaler}/locals.tf (100%) create mode 100644 aws-cluster-autoscaler/variables.tf create mode 100644 aws-support/.gitignore rename {provisioning/kubernetes/aws-support => aws-support}/data.tf (100%) create mode 100644 aws-support/kms-secrets-access.tf create mode 100644 aws-support/route53.cert-manager.tf create mode 100644 aws-support/route53.external-dns.tf create mode 100644 aws-support/s3-infrastructure-buckets.tf rename provisioning/kubernetes/aws-support/efs-sg.tf => aws-support/storage-class.efs.security-groups.tf (52%) rename {provisioning/kubernetes/aws-support => aws-support}/storage-class.efs.tf (74%) rename {provisioning/kubernetes/aws-support => aws-support}/storage-class.gp2-retain.tf (78%) rename {provisioning/kubernetes/aws-support => aws-support}/storage-class.gp3.tf (83%) create mode 100644 aws-support/storage-class.iam-policies.tf rename {provisioning/kubernetes/aws-support => aws-support}/storage-class.st1.tf (75%) rename {provisioning/kubernetes/aws-support => aws-support}/variables.tf (55%) create mode 100644 cluster-aws-auth-cm.tf create mode 100644 cluster-aws.tf create mode 100644 cluster-nodegroups.tf delete mode 100644 custom_node_groups.tf create mode 100644 examples/basic/data.tf create mode 100644 examples/basic/versions.tf create mode 100644 examples/extended-solution-gitlab/data.tf delete mode 100755 examples/extended-solution-gitlab/gitlab-management/gitlab-admin.tf delete mode 100644 examples/extended-solution-gitlab/gitlab-management/vars.tf delete mode 100644 examples/extended-solution-gitlab/gitlab-management/versions.tf create mode 100644 examples/full-solution-k8s/data.tf create mode 100644 examples/full-solution-k8s/main.tf create mode 100644 examples/full-solution-k8s/providers.tf create mode 100644 examples/full-solution-k8s/vars.tf delete mode 100644 examples/optional-components-k8s/datadog-dashboard/datadog-infrastructure.tf delete mode 100644 examples/optional-components-k8s/datadog-dashboard/src/values.v2.8.3.yaml delete mode 100644 examples/optional-components-k8s/main.tf delete mode 100644 examples/optional-components-k8s/providers.tf delete mode 100644 examples/optional-components-k8s/vars.tf create mode 100644 examples/standard/data.tf create mode 100644 kubernetes-helm.tf create mode 100644 kubernetes-namespaces.tf create mode 100644 kubernetes-secret.tf delete mode 100644 kubernetes.tf create mode 100644 provider.tf create mode 100644 provisioning/kubernetes/argocd/argocd.tf create mode 100644 provisioning/kubernetes/argocd/data.tf create mode 100644 provisioning/kubernetes/argocd/locals.tf create mode 100644 provisioning/kubernetes/argocd/registry-secrets.tf create mode 100644 provisioning/kubernetes/argocd/repository-credential-templates.tf create mode 100644 provisioning/kubernetes/argocd/repository-secrets.tf create mode 100644 provisioning/kubernetes/argocd/variables.tf create mode 100644 provisioning/kubernetes/argocd/version.tf delete mode 100644 provisioning/kubernetes/aws-support/README.md delete mode 100644 provisioning/kubernetes/aws-support/efs-service-account-values.yaml delete mode 100644 provisioning/kubernetes/aws-support/efs-serviceaccount.tf delete mode 100644 provisioning/kubernetes/aws-support/iam-policy.efs.tf delete mode 100644 provisioning/kubernetes/aws-support/iam-policy.gp3.tf delete mode 100644 provisioning/kubernetes/aws-support/src/autoscaler-iam-policy.json delete mode 100644 provisioning/kubernetes/aws-support/src/ebs-storage-class.yml delete mode 100644 provisioning/kubernetes/aws-support/src/efs-storage-class.yml rename provisioning/kubernetes/certmanager/{src => issuer_examples}/.gitignore (100%) rename provisioning/kubernetes/certmanager/{src/environments => issuer_examples}/develop/kubectl-apply-example.sh (100%) rename provisioning/kubernetes/certmanager/{src/environments => issuer_examples}/develop/kubernetes-cert-management.yaml (100%) rename provisioning/kubernetes/certmanager/{src/environments => issuer_examples}/prod/kubernetes-cert-management.yaml (100%) rename provisioning/kubernetes/certmanager/{src/environments => issuer_examples}/prod2/kubernetes-cert-management.yaml (100%) rename provisioning/kubernetes/certmanager/{src/environments => issuer_examples}/stag/kubernetes-cert-management.yaml (100%) delete mode 100644 provisioning/kubernetes/cluster-autoscaler/variables.tf create mode 100644 provisioning/kubernetes/hashicorp-consul/variables.tf create mode 100644 provisioning/kubernetes/hashicorp-vault/variables.tf delete mode 100755 provisioning/kubernetes/kubernetes-dashboard/legacy/kubernetes-dashboard-admin.rbac.yaml delete mode 100755 provisioning/kubernetes/kubernetes-dashboard/legacy/kubernetes-dashboard-recommended.yaml create mode 100644 provisioning/kubernetes/kubernetes-dashboard/variables.tf create mode 100644 route53.tf delete mode 100755 tools/gitlab-terraform.sh diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 08a60b6..0000000 --- a/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -.terraform/**/* -**/*/.terraform \ No newline at end of file diff --git a/.gitignore b/.gitignore index 11dcd1d..c9adb71 100755 --- a/.gitignore +++ b/.gitignore @@ -1,27 +1,80 @@ -# Kubernetes +################################## +#### ------------------------ #### +#### FOSS - GITIGNORE #### +#### v1.0.1 #### +#### ------------------------ #### +################################## +### -- +### Logs +### -- +logs +*.log + +### -- +### Runtime +### -- +pids +*.pid +*.seed + +### -- +### Testing Dependencies +### -- +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov +# Coverage directory used by tools like istanbul +coverage + +### -- +### Build Dependencies +### -- +# node-waf configuration +.lock-wscript +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt +# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git +node_modules +*.pyc + + +### -- +### Compiled binary addons (http://nodejs.org/api/addons.html) +### -- +build +build/Release +dist + +### -- +### Kubernetes +### -- .kubeconfig* -values.overrides* -# Terraform +### -- +### Terraform +### -- *.tfvars *.tfstate* +.terragrunt-cache/ .terraform/ .infracost/ terraform.tfstate.d/ -# Language and Compiler Files -*.pyc - -# IDE Files +### -- +### Dev Dependencies +### -- .idea -# Others -*.log +### -- +### Others +### -- *.vault* *.pem *.gpg .DS_Store -# Environment +### -- +### Environment +### -- .env* +.env.yaml !.env.example diff --git a/.hadolint.yml b/.hadolint.yml deleted file mode 100644 index 157f549..0000000 --- a/.hadolint.yml +++ /dev/null @@ -1,13 +0,0 @@ -ignored: - - DL3006 # Version tagged dynamically for multiple kubectl versions - - DL3018 # Always using latest - - DL3007 # Always using latest - - DL3003 # Ignoring WORKDIR rule as we are CD'ing post clone - - DL4006 # Disabling edge case "SHELL option -o" check - - DL4001 # Using CURL + WGET - - DL3013 # PIP Versions not pinned for pre-commit package, no need - - DL3059 # Multiple RUN Commands. Will not remove - -trustedRegistries: - - docker.io - - registry.gitlab.com diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29a3807..5524e9e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,9 +1,10 @@ -exclude: ^examples/ +exclude: (^examples/|archive/) repos: - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.62.3 + rev: v1.72.1 hooks: - id: terraform_fmt - id: terraform_docs - id: terraform_validate - - id: terraform_tflint \ No newline at end of file + - id: terraform_tflint + - id: terraform_tfsec \ No newline at end of file diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl deleted file mode 100644 index 26151c7..0000000 --- a/.terraform.lock.hcl +++ /dev/null @@ -1,164 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/gitlabhq/gitlab" { - version = "3.8.0" - constraints = "~> 3.4" - hashes = [ - "h1:ESZehCeHY3G5tCrL47HH+0W18Mt/IYslwaadGfpucU8=", - "h1:MOSn/Gg9RF+wkUVy9gzPiQb2YBFDy8BNJCMSihZAafU=", - "zh:02d977e6803ff336054194e567fcd11d095846346a604017afef98ada0353bc1", - "zh:07294f6061db86c1ed44e5eae01576d76c585128e334761feb01fca5b866d53f", - "zh:1cfa4cbbdb9a8a235bb82e7f3968982993750fc5a95bd948b9dfd6de772bff0a", - "zh:3f9172206c00de224e3e234503681c29f918e2b83dbf5b11c0e791694600bec6", - "zh:71174040a680be48f65b63fb5571bdd76cd17de830c88b81b6e684b56d77616c", - "zh:81b1c86b8b59c4f3faed59d461555ce53d86b12f9fc91a2afaee89ca3992f154", - "zh:a09d2012b4392c01a310713c170db1d7e211190b0e2a27e43ec2f7ca59b6d6c9", - "zh:a97ed255d3d161313370fc34e83018216dcf198e7a5b285abe31b26bf8b048f3", - "zh:abfad232ec29ecea620e087d20f1586d0b5b4fc02a6369a6b0c65e80b8de3566", - "zh:c8814ae34933a082bf68db958ed6ef708321093ce9901a11e2efceaa5b3ed031", - "zh:e5a52eaada2797fbec6aca19bc7a74cdb40dc1625df999f6a93c34690ea2bc80", - "zh:f4e1787f9fcab92824dad41486eaa6895a789c2d25f3d3f3126d134bd5edfe90", - "zh:f90b196e6ff444fe721603765a5be5778bc92d45458009bc77ee6d1b6c32bf67", - ] -} - -provider "registry.terraform.io/hashicorp/aws" { - version = "3.74.0" - constraints = ">= 2.23.0, >= 2.50.0, >= 3.28.0, >= 3.40.0, >= 3.56.0, ~> 3.58, >= 3.63.0, ~> 3.69" - hashes = [ - "h1:YNOblHBUf+XTjGTfIIsAMGp4weXB+tmQrMPCrpmM1/U=", - "h1:wIP53ozevE0ihhP1Fuoir4N1qI7+TcBs0y4SHlwMxho=", - "zh:00767509c13c0d1c7ad6af702c6942e6572aa6d529b40a00baacc0e73faafea2", - "zh:03aafdc903ad49c2eda03889f927f44212674c50e475a9c6298850381319eec2", - "zh:2de8a6a97b180f909d652f215125aa4683e99db15fcf3b28d62e3d542f875ed6", - "zh:3ac29ebc3af99028f4230a79f56606a0c2954b68767bd749b921a76eb4f3bd30", - "zh:50add2e2d118a15a644360eabc5a34cec59f2560b491f8fabf9c52ab83ca7b09", - "zh:85dd8e81910ab79f841a4a595fdd8ac358fbfe460956144afb0be3d81f91fe10", - "zh:895de83d0f0941fde31bfc53fa6b1ea276901f006bec221bbdee4771a04f3693", - "zh:a15c9724aac52d1ba5001d2d83e42843099b52b1638ea29d84e20be0f45fa4f1", - "zh:c982a64463bd73e9bff2589de214b1de0a571438d9015001f9eae45cfc3a2559", - "zh:e9ef973c18078324e43213ea1252c12b9441e566bf054ddfdbff5dd62f3035d9", - "zh:f297e705b0f339c8baa27ae70db5df9aa6578adfe1ea3d2ba8edc186512464eb", - ] -} - -provider "registry.terraform.io/hashicorp/cloudinit" { - version = "2.2.0" - hashes = [ - "h1:Id6dDkpuSSLbGPTdbw49bVS/7XXHu/+d7CJoGDqtk5g=", - "h1:tQLNREqesrdCQ/bIJnl0+yUK+XfdWzAG0wo4lp10LvM=", - "zh:76825122171f9ea2287fd27e23e80a7eb482f6491a4f41a096d77b666896ee96", - "zh:795a36dee548e30ca9c9d474af9ad6d29290e0a9816154ad38d55381cd0ab12d", - "zh:9200f02cb917fb99e44b40a68936fd60d338e4d30a718b7e2e48024a795a61b9", - "zh:a33cf255dc670c20678063aa84218e2c1b7a67d557f480d8ec0f68bc428ed472", - "zh:ba3c1b2cd0879286c1f531862c027ec04783ece81de67c9a3b97076f1ce7f58f", - "zh:bd575456394428a1a02191d2e46af0c00e41fd4f28cfe117d57b6aeb5154a0fb", - "zh:c68dd1db83d8437c36c92dc3fc11d71ced9def3483dd28c45f8640cfcd59de9a", - "zh:cbfe34a90852ed03cc074601527bb580a648127255c08589bc3ef4bf4f2e7e0c", - "zh:d6ffd7398c6d1f359b96f5b757e77b99b339fbb91df1b96ac974fe71bc87695c", - "zh:d9c15285f847d7a52df59e044184fb3ba1b7679fd0386291ed183782683d9517", - "zh:f7dd02f6d36844da23c9a27bb084503812c29c1aec4aba97237fec16860fdc8c", - ] -} - -provider "registry.terraform.io/hashicorp/helm" { - version = "2.4.1" - constraints = "~> 2.0" - hashes = [ - "h1:Gqwrr+yKWR79esN39X9eRCddxMNapmaGMynLfjrUJJo=", - "h1:aFvUq5HOEwFV/3e7DGU45zDf6j2SThDRjaCAeY2Qfss=", - "zh:07517b24ea2ce4a1d3be3b88c3efc7fb452cd97aea8fac93ca37a08a8ec06e14", - "zh:11ef6118ed03a1b40ff66adfe21b8707ece0568dae1347ddfbcff8452c0655d5", - "zh:1ae07e9cc6b088a6a68421642c05e2fa7d00ed03e9401e78c258cf22a239f526", - "zh:1c5b4cd44033a0d7bf7546df930c55aa41db27b70b3bca6d145faf9b9a2da772", - "zh:256413132110ddcb0c3ea17c7b01123ad2d5b70565848a77c5ccc22a3f32b0dd", - "zh:4ab46fd9aadddef26604382bc9b49100586647e63ef6384e0c0c3f010ff2f66e", - "zh:5a35d23a9f08c36fceda3cef7ce2c7dc5eca32e5f36494de695e09a5007122f0", - "zh:8e9823a1e5b985b63fe283b755a821e5011a58112447d42fb969c7258ed57ed3", - "zh:8f79722eba9bf77d341edf48a1fd51a52d93ec31d9cac9ba8498a3a061ea4a7f", - "zh:b2ea782848b10a343f586ba8ee0cf4d7ff65aa2d4b144eea5bbd8f9801b54c67", - "zh:e72d1ccf8a75d8e8456c6bb4d843fd4deb0e962ad8f167fa84cf17f12c12304e", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.7.1" - constraints = ">= 1.11.1, ~> 2.0" - hashes = [ - "h1:4LPAvfuRY3Kw9bycdOttVqxYryg1DThYhAr9D8jZ+fo=", - "h1:Df9MZxqgXueXVObeAiPPDQ5aLwQ2bJ2r1gul/IYSxeg=", - "zh:0da320fd81ece6696f7cceda35e459ee97cae8955088af38fc7f2feab1dce924", - "zh:37d304b8b992518c9c12e8f10437b9d4a0cc5a823c9421ac794ad2347c4d1122", - "zh:3d4e12fb9588c3b2e782d392fea758c6982e5d653154bec951e949155bcbc169", - "zh:6bb32b8d5cccf3e3ae7c124ed27df76dc7653ca760c132addeee15272630c930", - "zh:94775153b90e285876fc17261e8f5338a1ff732f4133336cc68754acb74570b6", - "zh:a665d1336765cdf8620a8797fd4e7e3cecf789e96e59ba80634336a4390df377", - "zh:aa8b35e9958cb89f01c115e8866a07d5468fb53f1c227d673e94f7ee8fb76242", - "zh:b7a571336387d773a74ed6eefa3843ff78d3662f2745c99c95008002a1341662", - "zh:c50d661782175d50ea4952fe943b0e4a3e33c27aa69e5ff21b3cbfa513e90d0a", - "zh:e0999b349cc772c75876adbc2a13b5dc256d3ecd7e4aa91baee5fdfcecaa7465", - "zh:e1399aec06a7aa98e9b0f64b4281697247f338a8a40b79f5f6ebfd43bf4ce1e2", - ] -} - -provider "registry.terraform.io/hashicorp/local" { - version = "2.1.0" - constraints = ">= 1.4.0" - hashes = [ - "h1:EYZdckuGU3n6APs97nS2LxZm3dDtGqyM4qaIvsmac8o=", - "h1:PaQTpxHMbZB9XV+c1od1eaUvndQle3ZZHx79hrI6C3k=", - "zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2", - "zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab", - "zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3", - "zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a", - "zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe", - "zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1", - "zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c", - "zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4", - "zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b", - "zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3", - "zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91", - ] -} - -provider "registry.terraform.io/hashicorp/random" { - version = "3.1.0" - hashes = [ - "h1:9cCiLO/Cqr6IUvMDSApCkQItooiYNatZpEXmcu0nnng=", - "h1:BZMEPucF+pbu9gsPk0G0BHx7YP04+tKdq2MrRDF1EDM=", - "zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc", - "zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626", - "zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff", - "zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2", - "zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992", - "zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427", - "zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc", - "zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f", - "zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b", - "zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7", - "zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a", - ] -} - -provider "registry.terraform.io/terraform-aws-modules/http" { - version = "2.4.1" - constraints = ">= 2.4.1" - hashes = [ - "h1:ZnkXcawrIr611RvZpoDzbtPU7SVFyHym+7p1t+PQh20=", - "h1:fHqAXle/P/fT2k+HEyTqYVE+/RvpQAaBr6xXZgM66es=", - "zh:0111f54de2a9815ded291f23136d41f3d2731c58ea663a2e8f0fef02d377d697", - "zh:0740152d76f0ccf54f4d0e8e0753739a5233b022acd60b5d2353d248c4c17204", - "zh:569518f46809ec9cdc082b4dfd4e828236eee2b50f87b301d624cfd83b8f5b0d", - "zh:7669f7691de91eec9f381e9a4be81aa4560f050348a86c6ea7804925752a01bb", - "zh:81cd53e796ec806aca2d8e92a2aed9135661e170eeff6cf0418e54f98816cd05", - "zh:82f01abd905090f978b169ac85d7a5952322a5f0f460269dd981b3596652d304", - "zh:9a235610066e0f7e567e69c23a53327271a6fc568b06bf152d8fe6594749ed2b", - "zh:aeabdd8e633d143feb67c52248c85358951321e35b43943aeab577c005abd30a", - "zh:c20d22dba5c79731918e7192bc3d0b364d47e98a74f47d287e6cc66236bc0ed0", - "zh:c4fea2cb18c31ed7723deec5ebaff85d6795bb6b6ed3b954794af064d17a7f9f", - "zh:e21e88b6e7e55b9f29b046730d9928c65a4f181fd5f60a42f1cd41b46a0a938d", - "zh:eddb888a74dea348a0acdfee13a08875bacddde384bd9c28342a534269665568", - "zh:f46d5f1403b8d8dfafab9bdd7129d3080bb62a91ea726f477fd43560887b8c4a", - ] -} diff --git a/.tfsec.yml b/.tfsec.yml index 09de779..87834d9 100644 --- a/.tfsec.yml +++ b/.tfsec.yml @@ -9,3 +9,4 @@ exclude: - AWS082 # FALSE POSITIVE | "Default VPC" is actually not used - AWS077 # FALSE POSITIVE | No need to version S3 web files - AWS098 # TODO:Public Access Block for S3? + - AWS097 ## TODO: Restrict resources to cluster-associated only. \ No newline at end of file diff --git a/LICENSE b/LICENSE index 66f382c..ef7e7ef 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,674 @@ -The MIT License (MIT) - -Copyright (c) 2021 Aaron Baideme - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/README.md b/README.md index 36a2efa..590c90e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # infrastructure-terraform-eks -[![LICENSE](https://img.shields.io/badge/license-Apache_2-blue)](https://opensource.org/licenses/Apache-2.0) +[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) A custom-build terraform module, leveraging terraform-aws-eks to create a managed Kubernetes cluster on AWS EKS. In addition to provisioning simply an EKS cluster, this module alongside additional components to complete an entire end-to-end base stack for a functional kubernetes cluster for development and production level environments, including a base set of software that can/should be commonly used across all clusters. Primary integrated sub-modules include: - [AWS EKS Terraform Module](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws) @@ -67,80 +67,88 @@ And install `terraform-docs` with `go get github.com/segmentio/terraform-docs` o ## Contributing -Report issues/questions/feature requests on in the [issues](https://github.com/AaronForce1/terraform-aws-infrastructure-eks/issues/new) section. +Report issues/questions/feature requests on in the [issues](https://gitlab.com/magnetic-asia/infrastructure-as-code/infrastructure-terraform-eks/issues/new) section. -Full contributing [guidelines are covered here](https://github.com/AaronForce1/terraform-aws-infrastructure-eks/blob/main/.gitlab/CONTRIBUTING.md). +Full contributing [guidelines are covered here](https://gitlab.com/magnetic-asia/infrastructure-as-code/infrastructure-terraform-eks/blob/master/.github/CONTRIBUTING.md). ## Change log -- The [changelog](https://github.com/AaronForce1/terraform-aws-infrastructure-eks/blob/main/CHANGELOG.md) captures all important release notes from 2.0.2 +- The [changelog](https://gitlab.com/magnetic-asia/infrastructure-as-code/infrastructure-terraform-eks/tree/master/CHANGELOG.md) captures all important release notes from 1.1.17 ## Authors -Created by [Aaron Baideme](https://github.com/aaronforce1) - aaron.baideme@advancedtechnologies.com.hk +Created by [Aaron Baideme](https://gitlab.com/aaronforce1) - aaron.baideme@magneticasia.com Supported by Ronel Cartas - ronel.cartas@magneticasia.com -Supported by Diederik Damen - diederik.damen@magneticasia.com ## License -MIT Licensed. See [LICENSE](https://github.com/AaronForce1/terraform-aws-infrastructure-eks/blob/main/LICENSE) for full details. +MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-code/infrastructure-terraform-eks/tree/master/LICENSE) for full details. ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.14.5 | -| [aws](#requirement\_aws) | ~> 3.58 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 4.5 | | [gitlab](#requirement\_gitlab) | ~> 3.4 | | [helm](#requirement\_helm) | ~> 2.0 | -| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [kubectl](#requirement\_kubectl) | ~> 1.14.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.11.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | 3.74.0 | -| [local](#provider\_local) | 2.1.0 | -| [random](#provider\_random) | 3.1.0 | +| [aws](#provider\_aws) | ~> 4.5 | +| [aws.secondary](#provider\_aws.secondary) | ~> 4.5 | +| [kubectl](#provider\_kubectl) | ~> 1.14.0 | +| [kubernetes](#provider\_kubernetes) | ~> 2.11.0 | +| [local](#provider\_local) | n/a | +| [random](#provider\_random) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws-cluster-autoscaler](#module\_aws-cluster-autoscaler) | ./provisioning/kubernetes/cluster-autoscaler | n/a | -| [aws-support](#module\_aws-support) | ./provisioning/kubernetes/aws-support | n/a | +| [argocd](#module\_argocd) | ./provisioning/kubernetes/argocd | n/a | +| [aws-cluster-autoscaler](#module\_aws-cluster-autoscaler) | ./aws-cluster-autoscaler | n/a | +| [aws-support](#module\_aws-support) | ./aws-support | n/a | | [certmanager](#module\_certmanager) | ./provisioning/kubernetes/certmanager | n/a | | [consul](#module\_consul) | ./provisioning/kubernetes/hashicorp-consul | n/a | -| [eks](#module\_eks) | terraform-aws-modules/eks/aws | ~> 17.15.0 | -| [eks-vpc](#module\_eks-vpc) | terraform-aws-modules/vpc/aws | ~> 3.1 | -| [eks-vpc-endpoints](#module\_eks-vpc-endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | ~> 3.1 | +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | ~> 18.23.0 | +| [eks-vpc](#module\_eks-vpc) | terraform-aws-modules/vpc/aws | ~> 3.14 | +| [eks-vpc-endpoints](#module\_eks-vpc-endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | ~> 3.14 | +| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | ~> 18.23.0 | | [elastic-stack](#module\_elastic-stack) | ./provisioning/kubernetes/elastic-stack | n/a | -| [gitlab-k8s-agent](#module\_gitlab-k8s-agent) | ./provisioning/kubernetes/gitlab-kubernetes-agent | n/a | | [grafana](#module\_grafana) | ./provisioning/kubernetes/grafana | n/a | | [kubernetes-dashboard](#module\_kubernetes-dashboard) | ./provisioning/kubernetes/kubernetes-dashboard | n/a | -| [metrics-server](#module\_metrics-server) | ./provisioning/kubernetes/metrics-server | n/a | -| [namespaces](#module\_namespaces) | ./provisioning/kubernetes/namespaces | n/a | | [nginx-controller-ingress](#module\_nginx-controller-ingress) | ./provisioning/kubernetes/nginx-controller | n/a | -| [stakater-reloader](#module\_stakater-reloader) | ./provisioning/kubernetes/stakater-reloader | n/a | | [subnet\_addrs](#module\_subnet\_addrs) | hashicorp/subnets/cidr | 1.0.0 | | [vault](#module\_vault) | ./provisioning/kubernetes/hashicorp-vault | n/a | -| [vault-operator](#module\_vault-operator) | ./provisioning/kubernetes/bonzai-vault-operator | n/a | -| [vault-secrets-webhook](#module\_vault-secrets-webhook) | ./provisioning/kubernetes/bonzai-vault-secrets-webhook | n/a | ## Resources | Name | Type | |------|------| -| [aws_eks_node_group.custom_node_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | +| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_kms_alias.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_alias) | resource | | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_kms_replica_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_replica_key) | resource | +| [aws_route53_zone.hosted_zone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_zone) | resource | | [aws_vpc_endpoint.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) | resource | +| [kubectl_manifest.aws-auth](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | +| [kubernetes_namespace.cluster](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_secret.regcred](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | | [random_integer.cidr_vpc](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/integer) | resource | -| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_availability_zones.available_azs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_eks_cluster.my-cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.my-auth](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_ssm_parameter.regcred_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.regcred_username](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | | [local_file.infrastructure-terraform-eks-version](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs @@ -149,51 +157,57 @@ MIT Licensed. See [LICENSE](https://github.com/AaronForce1/terraform-aws-infrast |------|-------------|------|---------|:--------:| | [app\_name](#input\_app\_name) | Application Name | `string` | `"eks"` | no | | [app\_namespace](#input\_app\_namespace) | Tagged App Namespace | `any` | n/a | yes | -| [aws\_autoscaler\_cordon\_node\_before\_term](#input\_aws\_autoscaler\_cordon\_node\_before\_term) | AWS Autoscaling, cordon\_node\_before\_term (AWS defaults to false, but setting it to true migth give a more friendly removal process) | `string` | `"true"` | no | -| [aws\_autoscaler\_scale\_down\_util\_threshold](#input\_aws\_autoscaler\_scale\_down\_util\_threshold) | AWS Autoscaling, scale\_down\_util\_threshold (AWS defaults to 0.5, but raising that to 0.7 to be a tad more aggressive with scaling back) | `number` | `0.7` | no | -| [aws\_autoscaler\_skip\_nodes\_with\_local\_storage](#input\_aws\_autoscaler\_skip\_nodes\_with\_local\_storage) | AWS Autoscaling, skip\_nodes\_with\_local\_storage (AWS defaults to true, also modifying to false for more scaling back) | `string` | `"false"` | no | -| [aws\_autoscaler\_skip\_nodes\_with\_system\_pods](#input\_aws\_autoscaler\_skip\_nodes\_with\_system\_pods) | AWS Autoscaling, skip\_nodes\_with\_system\_pods (AWS defaults to true, but here default to false, again to be a little bit more aggressive with scaling back) | `string` | `"false"` | no | -| [aws\_region](#input\_aws\_region) | Region for the VPC | `any` | n/a | yes | -| [billingcustomer](#input\_billingcustomer) | Which BILLINGCUSTOMER is setup in AWS | `any` | n/a | yes | +| [autoscaling\_configuration](#input\_autoscaling\_configuration) | n/a |
object({
scale_down_util_threshold = number
skip_nodes_with_local_storage = bool
skip_nodes_with_system_pods = bool
cordon_node_before_term = bool
})
|
{
"cordon_node_before_term": true,
"scale_down_util_threshold": 0.7,
"skip_nodes_with_local_storage": false,
"skip_nodes_with_system_pods": false
}
| no | +| [aws\_installations](#input\_aws\_installations) | AWS Support Components including Cluster Autoscaler, EBS/EFS Storage Classes, etc. |
object({
storage_ebs = optional(object({
eks_irsa_role = bool
gp2 = bool
gp3 = bool
st1 = bool
}))
storage_efs = optional(object({
eks_irsa_role = bool
eks_security_groups = bool
efs = bool
}))
cluster_autoscaler = optional(bool)
route53_external_dns = optional(bool)
kms_secrets_access = optional(bool)
cert_manager = optional(bool)
})
|
{
"cert_manager": true,
"cluster_autoscaler": true,
"kms_secrets_access": true,
"route53_external_dns": true,
"storage_ebs": {
"eks_irsa_role": true,
"gp2": true,
"gp3": true,
"st1": true
},
"storage_efs": {
"efs": true,
"eks_irsa_role": true,
"eks_security_groups": true
}
}
| no | +| [aws\_profile](#input\_aws\_profile) | AWS Profile | `string` | `""` | no | +| [aws\_region](#input\_aws\_region) | AWS Region for all primary configurations | `any` | n/a | yes | +| [aws\_secondary\_region](#input\_aws\_secondary\_region) | Secondary Region for certain redundant AWS components | `any` | n/a | yes | +| [billingcustomer](#input\_billingcustomer) | Which Billingcustomer, aka Cost Center, is responsible for this infra provisioning | `any` | n/a | yes | +| [cluster\_addons](#input\_cluster\_addons) | An add-on is software that provides supporting operational capabilities to Kubernetes applications, but is not specific to the application: coredns, kube-proxy, vpc-cni | `any` |
{
"coredns": {
"resolve_conflicts": "OVERWRITE"
},
"kube-proxy": {},
"vpc-cni": {
"resolve_conflicts": "OVERWRITE"
}
}
| no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | If the cluster endpoint is to be exposed to the public internet, specify CIDRs here that it should be restricted to | `list(string)` | `[]` | no | +| [cluster\_name](#input\_cluster\_name) | Optional override for cluster name instead of standard {name}-{namespace}-{env} | `string` | `""` | no | +| [cluster\_root\_domain](#input\_cluster\_root\_domain) | Domain root where all kubernetes systems are orchestrating control |
object({
create = optional(bool)
name = string
ingress_records = optional(list(string))
})
| n/a | yes | | [cluster\_version](#input\_cluster\_version) | Kubernetes Cluster Version | `string` | `"1.21"` | no | | [create\_launch\_template](#input\_create\_launch\_template) | enable launch template on node group | `bool` | `false` | no | +| [custom\_aws\_s3\_support\_infra](#input\_custom\_aws\_s3\_support\_infra) | Adding the ability to provision additional support infrastructure required for certain EKS Helm chart/App-of-App Components |
list(object({
name = string
bucket_acl = string
aws_kms_key_id = optional(string)
lifecycle_rules = any
versioning = bool
k8s_namespace_service_account_access = any
}))
| `[]` | no | +| [custom\_namespaces](#input\_custom\_namespaces) | Adding namespaces to a default cluster provisioning process | `list(string)` | `[]` | no | | [default\_ami\_type](#input\_default\_ami\_type) | Default AMI used for node provisioning | `string` | `"AL2_x86_64"` | no | -| [enable\_aws\_vault\_unseal](#input\_enable\_aws\_vault\_unseal) | If Vault is enabled and deployed, by default, the unseal process is manual; Changing this to true allows for automatic unseal using AWS KMS | `bool` | `false` | no | -| [extra\_tags](#input\_extra\_tags) | n/a | `map(any)` | `{}` | no | -| [gitlab\_kubernetes\_agent\_config](#input\_gitlab\_kubernetes\_agent\_config) | Configuration for Gitlab Kubernetes Agent |
object({
gitlab_agent_url = string
gitlab_agent_secret = string
})
|
{
"gitlab_agent_secret": "",
"gitlab_agent_url": ""
}
| no | +| [default\_capacity\_type](#input\_default\_capacity\_type) | Default capacity configuraiton used for node provisioning. Valid values: `ON_DEMAND, SPOT` | `string` | `"ON_DEMAND"` | no | +| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Override default 'single nodegroup, on a private subnet' with more advaned configuration archetypes | `any` | `[]` | no | +| [elastic\_ip\_custom\_configuration](#input\_elastic\_ip\_custom\_configuration) | By default, this module will provision new Elastic IPs for the VPC's NAT Gateways; however, one can also override and specify separate, pre-existing elastic IPs as needed in order to preserve IPs that are whitelisted; reminder that the list of EIPs should have the same count as nat gateways created. |
object({
enabled = bool
reuse_nat_ips = optional(bool)
external_nat_ip_ids = optional(list(string))
})
|
{
"enabled": false,
"external_nat_ip_ids": [],
"reuse_nat_ips": false
}
| no | | [google\_authDomain](#input\_google\_authDomain) | Used for Infrastructure OAuth: Google Auth Domain | `any` | n/a | yes | | [google\_clientID](#input\_google\_clientID) | Used for Infrastructure OAuth: Google Auth Client ID | `any` | n/a | yes | | [google\_clientSecret](#input\_google\_clientSecret) | Used for Infrastructure OAuth: Google Auth Client Secret | `any` | n/a | yes | -| [helm\_installations](#input\_helm\_installations) | n/a |
object({
gitlab_runner = bool
gitlab_k8s_agent = bool
vault_consul = bool
ingress = bool
elasticstack = bool
grafana = bool
stakater_reloader = bool
metrics_server = bool
})
|
{
"elasticstack": false,
"gitlab_k8s_agent": false,
"gitlab_runner": false,
"grafana": true,
"ingress": true,
"metrics_server": true,
"stakater_reloader": true,
"vault_consul": true
}
| no | -| [instance\_desired\_size](#input\_instance\_desired\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `8` | no | -| [instance\_max\_size](#input\_instance\_max\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `12` | no | -| [instance\_min\_size](#input\_instance\_min\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `2` | no | -| [instance\_type](#input\_instance\_type) | AWS Instance Type for provisioning | `string` | `"c5a.large"` | no | -| [ipv6](#input\_ipv6) | n/a |
object({
enable = bool
assign_ipv6_address_on_creation = bool
private_subnet_assign_ipv6_address_on_creation = bool
public_subnet_assign_ipv6_address_on_creation = bool
})
|
{
"assign_ipv6_address_on_creation": true,
"enable": false,
"private_subnet_assign_ipv6_address_on_creation": true,
"public_subnet_assign_ipv6_address_on_creation": true
}
| no | -| [letsencrypt\_email](#input\_letsencrypt\_email) | email used for the clusterissuer email definition (spec.acme.email) | `any` | n/a | yes | -| [managed\_node\_groups](#input\_managed\_node\_groups) | Override default 'single nodegroup, on a private subnet' with more advaned configuration archetypes |
list(object({
name = string
desired_capacity = number
max_capacity = number
min_capacity = number
instance_type = string
ami_type = optional(string)
key_name = string
public_ip = bool
create_launch_template = bool
disk_size = number
taints = list(object({
key = string
value = string
effect = string
affinity_label = bool
}))
subnet_selections = object({
public = bool
private = bool
})
}))
| n/a | yes | +| [helm\_configurations](#input\_helm\_configurations) | n/a |
object({
dashboard = optional(string)
gitlab_runner = optional(string)
vault_consul = optional(object({
consul_values = optional(string)
vault_values = optional(string)
enable_aws_vault_unseal = optional(bool) # If Vault is enabled and deployed, by default, the unseal process is manual; Changing this to true allows for automatic unseal using AWS KMS"
vault_nodeselector = optional(string) # Allow for vault node selectors without extensive reconfiguration of the standard values file
vault_tolerations = optional(string) # Allow for tolerating certain taint on nodes, example usage, string:'NoExecute:we_love_hashicorp:true'
}))
ingress = optional(object({
nginx_values = optional(string)
certmanager_values = optional(string)
}))
elasticstack = optional(string)
grafana = optional(string)
argocd = optional(object({
value_file = optional(string)
application_set = optional(list(string))
repository_secrets = optional(list(object({
name = string
url = string
type = string
username = string
password = string
secrets_store = string
})))
credential_templates = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
})))
registry_secrets = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
auth = string
email = string
})))
generate_plugin_repository_secret = optional(bool)
}))
})
|
{
"argocd": null,
"dashboard": null,
"elasticstack": null,
"gitlab_runner": null,
"grafana": null,
"ingress": null,
"vault_consul": null
}
| no | +| [helm\_installations](#input\_helm\_installations) | n/a |
object({
dashboard = bool
gitlab_runner = bool
vault_consul = bool
ingress = bool
elasticstack = bool
grafana = bool
argocd = bool
})
|
{
"argocd": false,
"dashboard": true,
"elasticstack": false,
"gitlab_runner": false,
"grafana": true,
"ingress": true,
"vault_consul": true
}
| no | +| [instance\_desired\_size](#input\_instance\_desired\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `2` | no | +| [instance\_max\_size](#input\_instance\_max\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `4` | no | +| [instance\_min\_size](#input\_instance\_min\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `1` | no | +| [instance\_type](#input\_instance\_type) | AWS Instance Type for provisioning | `string` | `"c5a.medium"` | no | | [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no | | [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | | [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | | [nat\_gateway\_custom\_configuration](#input\_nat\_gateway\_custom\_configuration) | Override the default NAT Gateway configuration, which configures a single NAT gateway for non-prod, while one per AZ on tfenv=prod |
object({
enabled = bool
enable_nat_gateway = bool
enable_dns_hostnames = bool
single_nat_gateway = bool
one_nat_gateway_per_az = bool
enable_vpn_gateway = bool
propagate_public_route_tables_vgw = bool
})
|
{
"enable_dns_hostnames": true,
"enable_nat_gateway": true,
"enable_vpn_gateway": false,
"enabled": false,
"one_nat_gateway_per_az": true,
"propagate_public_route_tables_vgw": false,
"single_nat_gateway": false
}
| no | | [node\_key\_name](#input\_node\_key\_name) | EKS Node Key Name | `string` | `""` | no | | [node\_public\_ip](#input\_node\_public\_ip) | assign public ip on the nodes | `bool` | `false` | no | -| [root\_domain\_name](#input\_root\_domain\_name) | Domain root where all kubernetes systems are orchestrating control | `any` | n/a | yes | +| [operator\_domain\_name](#input\_operator\_domain\_name) | Domain root of operator cluster | `string` | `""` | no | +| [registry\_credentials](#input\_registry\_credentials) | Create list of registry credential for different namespaces, username and password are fetched from AWS parameter store |
list(object({
name = string
namespace = string
docker_username = string
docker_password = string
docker_server = string
docker_email = string
secrets_store = string
}))
| `[]` | no | | [root\_vol\_size](#input\_root\_vol\_size) | Root Volume Size | `string` | `"50"` | no | +| [tech\_email](#input\_tech\_email) | Tech Contact E-Mail for services such as LetsEncrypt | `any` | n/a | yes | | [tfenv](#input\_tfenv) | Environment | `any` | n/a | yes | -| [vault\_nodeselector](#input\_vault\_nodeselector) | for placing node/consul on specific nodes, example usage, string:'eks.amazonaws.com/nodegroup: vaultconsul\_group' | `string` | `""` | no | -| [vault\_tolerations](#input\_vault\_tolerations) | for tolerating certain taint on nodes, example usage, string:'NoExecute:we\_love\_hashicorp:true' | `string` | `""` | no | -| [vpc\_flow\_logs](#input\_vpc\_flow\_logs) | Manually enable or disable VPC flow logs; Please note, for production, these are enabled by default otherwise they will be disabled; setting a value for this object will override all defaults regardless of environment | `map` | `{}` | no | -| [vpc\_subnet\_configuration](#input\_vpc\_subnet\_configuration) | Configure VPC CIDR and relative subnet intervals for generating a VPC. If not specified, default values will be generated. |
object({
base_cidr = string
subnet_bit_interval = number
autogenerate = optional(bool)
})
|
{
"autogenerate": true,
"base_cidr": "172.%s.0.0/16",
"subnet_bit_interval": 4
}
| no | +| [vpc\_flow\_logs](#input\_vpc\_flow\_logs) | Manually enable or disable VPC flow logs; Please note, for production, these are enabled by default otherwise they will be disabled; setting a value for this object will override all defaults regardless of environment |
object({
enabled = optional(bool)
})
| `{}` | no | +| [vpc\_subnet\_configuration](#input\_vpc\_subnet\_configuration) | Configure VPC CIDR and relative subnet intervals for generating a VPC. If not specified, default values will be generated. |
object({
base_cidr = string
subnet_bit_interval = object({
public = number
private = number
})
autogenerate = optional(bool)
})
|
{
"autogenerate": true,
"base_cidr": "172.%s.0.0/16",
"subnet_bit_interval": {
"private": 6,
"public": 2
}
}
| no | ## Outputs | Name | Description | |------|-------------| +| [aws\_profile](#output\_aws\_profile) | n/a | +| [aws\_region](#output\_aws\_region) | # ----------- ## Region and AWS Profile Checks # ----------- | | [base\_cidr\_block](#output\_base\_cidr\_block) | n/a | -| [kubecfg](#output\_kubecfg) | n/a | -| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | n/a | +| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | n/a | +| [kubernetes-cluster-auth](#output\_kubernetes-cluster-auth) | n/a | +| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | output "kubecfg" { value = module.eks.kubeconfig } | | [kubernetes-cluster-endpoint](#output\_kubernetes-cluster-endpoint) | n/a | | [kubernetes-cluster-id](#output\_kubernetes-cluster-id) | n/a | | [private\_route\_table\_ids](#output\_private\_route\_table\_ids) | n/a | @@ -202,4 +216,4 @@ MIT Licensed. See [LICENSE](https://github.com/AaronForce1/terraform-aws-infrast | [public\_subnet\_ids](#output\_public\_subnet\_ids) | n/a | | [public\_subnets\_cidr\_blocks](#output\_public\_subnets\_cidr\_blocks) | n/a | | [vpc\_id](#output\_vpc\_id) | # ----------- # MODULE: VPC # ----------- | - + \ No newline at end of file diff --git a/VERSION b/VERSION index 50aea0e..56fea8a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1.0 \ No newline at end of file +3.0.0 \ No newline at end of file diff --git a/aws-cluster-autoscaler/.gitignore b/aws-cluster-autoscaler/.gitignore new file mode 100644 index 0000000..5b5fa30 --- /dev/null +++ b/aws-cluster-autoscaler/.gitignore @@ -0,0 +1,2 @@ +.terraform/ +.terraform.lock.hcl \ No newline at end of file diff --git a/provisioning/kubernetes/cluster-autoscaler/README.md b/aws-cluster-autoscaler/README.md similarity index 100% rename from provisioning/kubernetes/cluster-autoscaler/README.md rename to aws-cluster-autoscaler/README.md diff --git a/provisioning/kubernetes/cluster-autoscaler/cluster-autoscaler.tf b/aws-cluster-autoscaler/cluster-autoscaler.tf similarity index 97% rename from provisioning/kubernetes/cluster-autoscaler/cluster-autoscaler.tf rename to aws-cluster-autoscaler/cluster-autoscaler.tf index fe98bdd..e20c20a 100644 --- a/provisioning/kubernetes/cluster-autoscaler/cluster-autoscaler.tf +++ b/aws-cluster-autoscaler/cluster-autoscaler.tf @@ -12,7 +12,7 @@ resource "helm_release" "aws-cluster-autoscaler" { "serviceAccount" : { "name" : "aws-cluster-autoscaler-service-account", "annotations" : { - "eks.amazonaws.com/role-arn" : "${module.iam_assumable_role_admin.this_iam_role_arn}" + "eks.amazonaws.com/role-arn" : "${module.iam_assumable_role_admin.iam_role_arn}" } } }, diff --git a/provisioning/kubernetes/cluster-autoscaler/iam-policy.tf b/aws-cluster-autoscaler/iam-policy.tf similarity index 86% rename from provisioning/kubernetes/cluster-autoscaler/iam-policy.tf rename to aws-cluster-autoscaler/iam-policy.tf index 2c6ac28..bfd4671 100644 --- a/provisioning/kubernetes/cluster-autoscaler/iam-policy.tf +++ b/aws-cluster-autoscaler/iam-policy.tf @@ -2,9 +2,10 @@ module "iam_assumable_role_admin" { source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" - version = "3.6.0" + version = "4.24" create_role = true role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-cluster-autoscaler-role" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" provider_url = replace(var.cluster_oidc_issuer_url, "https://", "") role_policy_arns = [aws_iam_policy.cluster_autoscaler_policy.arn] oidc_fully_qualified_subjects = ["system:serviceaccount:${local.k8s_service_account_namespace}:${local.k8s_service_account_name}"] @@ -13,9 +14,10 @@ module "iam_assumable_role_admin" { resource "aws_iam_policy" "cluster_autoscaler_policy" { name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-cluster-autoscaler-policy" - path = "/" - description = "EKS cluster-autoscaler policy for cluster ${var.app_name}-${var.app_namespace}-${var.tfenv}" + path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + description = "EKS cluster-autoscaler policy: ${var.app_name}-${var.app_namespace}-${var.tfenv}" policy = data.aws_iam_policy_document.cluster_autoscaler.json + tags = var.tags } data "aws_iam_policy_document" "cluster_autoscaler" { @@ -61,4 +63,4 @@ data "aws_iam_policy_document" "cluster_autoscaler" { values = ["true"] } } -} +} \ No newline at end of file diff --git a/provisioning/kubernetes/cluster-autoscaler/locals.tf b/aws-cluster-autoscaler/locals.tf similarity index 100% rename from provisioning/kubernetes/cluster-autoscaler/locals.tf rename to aws-cluster-autoscaler/locals.tf diff --git a/aws-cluster-autoscaler/variables.tf b/aws-cluster-autoscaler/variables.tf new file mode 100644 index 0000000..cbb1d6e --- /dev/null +++ b/aws-cluster-autoscaler/variables.tf @@ -0,0 +1,28 @@ +variable "app_name" {} +variable "app_namespace" {} +variable "tfenv" {} +variable "cluster_oidc_issuer_url" {} +variable "aws_region" {} + +### AWS Cluster Autoscaling +variable "scale_down_util_threshold" { + description = "AWS Autoscaling, scale_down_util_threshold (AWS defaults to 0.5, but raising that to 0.7 to be a tad more aggressive with scaling back)" + default = 0.7 +} + +variable "skip_nodes_with_local_storage" { + description = "AWS Autoscaling, skip_nodes_with_local_storage (AWS defaults to true, also modifying to false for more scaling back)" + default = "false" +} + +variable "skip_nodes_with_system_pods" { + description = "AWS Autoscaling, skip_nodes_with_system_pods (AWS defaults to true, but here default to false, again to be a little bit more aggressive with scaling back)" + default = "false" +} + +variable "cordon_node_before_term" { + description = "AWS Autoscaling, cordon_node_before_term (AWS defaults to false, but setting it to true migth give a more friendly removal process)" + default = "true" +} + +variable "tags" {} \ No newline at end of file diff --git a/aws-support/.gitignore b/aws-support/.gitignore new file mode 100644 index 0000000..5b5fa30 --- /dev/null +++ b/aws-support/.gitignore @@ -0,0 +1,2 @@ +.terraform/ +.terraform.lock.hcl \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/data.tf b/aws-support/data.tf similarity index 100% rename from provisioning/kubernetes/aws-support/data.tf rename to aws-support/data.tf diff --git a/aws-support/kms-secrets-access.tf b/aws-support/kms-secrets-access.tf new file mode 100644 index 0000000..6692132 --- /dev/null +++ b/aws-support/kms-secrets-access.tf @@ -0,0 +1,66 @@ +## IAM Role for external-secrets +data "aws_iam_policy_document" "external_secrets" { + count = var.aws_installations.kms_secrets_access ? 1 : 0 + statement { + effect = "Allow" + actions = [ + "kms:GetPublicKey", + "kms:Decrypt", + "kms:ListKeyPolicies", + "secretsmanager:DescribeSecret", + "kms:ListRetirableGrants", + "ssm:GetParameterHistory", + "kms:GetKeyPolicy", + "kms:ListResourceTags", + "ssm:GetParameters", + "ssm:GetParameter", + "kms:ListGrants", + "secretsmanager:ListSecretVersionIds", + "kms:GetParametersForImport", + "kms:DescribeCustomKeyStores", + "kms:ListKeys", + "secretsmanager:GetSecretValue", + "kms:GetKeyRotationStatus", + "kms:Encrypt", + "ssm:DescribeParameters", + "kms:ListAliases", + "kms:DescribeKey", + "ssm:GetParametersByPath", + "secretsmanager:ListSecrets", + ] + ## TODO: Restrict resources to cluster-associated only. + resources = ["*"] + } +} + +resource "aws_iam_policy" "external_secrets" { + count = var.aws_installations.kms_secrets_access ? 1 : 0 + + name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-external-secrets-policy" + path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + description = "EKS External Secrets Policy allowing SSM and KMS access: ${var.app_name}-${var.app_namespace}-${var.tfenv}" + policy = data.aws_iam_policy_document.external_secrets[0].json + tags = var.tags +} + + +module "external_secrets_irsa_role" { + count = var.aws_installations.kms_secrets_access ? 1 : 0 + + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "4.24" + + role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-external_secrets" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + + oidc_providers = { + main = { + provider_arn = var.oidc_provider_arn + namespace_service_accounts = ["external-secrets:external-secrets"] + } + } + + # role_policy_arns = [ + # aws_iam_policy.external_secrets[0].arn + # ] +} diff --git a/aws-support/route53.cert-manager.tf b/aws-support/route53.cert-manager.tf new file mode 100644 index 0000000..cceb0b3 --- /dev/null +++ b/aws-support/route53.cert-manager.tf @@ -0,0 +1,48 @@ +data "aws_iam_policy_document" "cert_manager" { + count = try(var.aws_installations.cert_manager, false) && var.aws_installations.route53_external_dns ? 1 : 0 + + statement { + actions = ["route53:GetChange"] + resources = ["arn:aws:route53:::change/*"] + } + + statement { + actions = [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + ] + resources = [var.route53_hosted_zone_arn] + } +} + +resource "aws_iam_policy" "cert_manager" { + count = try(var.aws_installations.cert_manager, false) && var.aws_installations.route53_external_dns ? 1 : 0 + + name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-cert-manager-policy" + path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + description = "CertManager policy for managing Route53 records: ${var.app_name}-${var.app_namespace}-${var.tfenv}" + policy = data.aws_iam_policy_document.cert_manager[0].json + tags = var.tags +} + +# BUG: https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest#%E2%84%B9%EF%B8%8F-error-invalid-for_each-argument- +# WORKAROUND: terraform apply -target=aws_iam_policy.cert_manager +module "cert_manager_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "4.24" + + count = try(var.aws_installations.cert_manager, false) && var.aws_installations.route53_external_dns ? 1 : 0 + + role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-cert-manager" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + + attach_cert_manager_policy = true + cert_manager_hosted_zone_arns = [var.route53_hosted_zone_arn] + + oidc_providers = { + main = { + provider_arn = var.oidc_provider_arn + namespace_service_accounts = ["cert-manager:cert-manager"] + } + } +} \ No newline at end of file diff --git a/aws-support/route53.external-dns.tf b/aws-support/route53.external-dns.tf new file mode 100644 index 0000000..31d96a3 --- /dev/null +++ b/aws-support/route53.external-dns.tf @@ -0,0 +1,20 @@ +module "external_dns_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "4.24" + + count = var.aws_installations.route53_external_dns ? 1 : 0 + + role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-external-dns" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + + attach_external_dns_policy = true + external_dns_hosted_zone_arns = [var.route53_hosted_zone_arn] + + oidc_providers = { + main = { + provider_arn = var.oidc_provider_arn + namespace_service_accounts = ["external-dns:external-dns"] + } + } +} + diff --git a/aws-support/s3-infrastructure-buckets.tf b/aws-support/s3-infrastructure-buckets.tf new file mode 100644 index 0000000..5352749 --- /dev/null +++ b/aws-support/s3-infrastructure-buckets.tf @@ -0,0 +1,88 @@ +module "aws_s3_infra_support_buckets" { + source = "terraform-aws-modules/s3-bucket/aws" + version = "~> 3.2" + + count = length(var.eks_infrastructure_support_buckets) + + bucket = "${var.name_prefix}-${var.eks_infrastructure_support_buckets[count.index].name}" + + acl = var.eks_infrastructure_support_buckets[count.index].bucket_acl + force_destroy = var.tfenv == "prod" ? false : true + + block_public_policy = true + block_public_acls = true + ignore_public_acls = true + restrict_public_buckets = true + + server_side_encryption_configuration = { + rule = { + apply_server_side_encryption_by_default = { + kms_master_key_id = var.eks_infrastructure_support_buckets[count.index].aws_kms_key_id + sse_algorithm = "aws:kms" + } + } + } + + versioning = { + status = var.eks_infrastructure_support_buckets[count.index].versioning + } + + lifecycle_rule = var.eks_infrastructure_support_buckets[count.index].lifecycle_rules + + tags = var.tags +} + +## IAM Role for Loki +data "aws_iam_policy_document" "aws_s3_infra_support_bucket_iam_policy_document" { + count = length(var.eks_infrastructure_support_buckets) + statement { + actions = ["s3:*"] + resources = [ + "${module.aws_s3_infra_support_buckets[count.index].s3_bucket_arn}/*", + "${module.aws_s3_infra_support_buckets[count.index].s3_bucket_arn}" + ] + } + + statement { + actions = [ + "kms:GenerateDataKey", + "kms:Decrypt" + ] + resources = [ + var.eks_infrastructure_support_buckets[count.index].aws_kms_key_id != null ? var.eks_infrastructure_support_buckets[count.index].aws_kms_key_id : var.eks_infrastructure_kms_arn + ] + } +} + +resource "aws_iam_policy" "aws_s3_infra_support_bucket_iam_policies" { + count = length(var.eks_infrastructure_support_buckets) + + name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-s3-custom-policy-${var.eks_infrastructure_support_buckets[count.index].name}" + path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + description = "EKS S3-custom-policy-${var.eks_infrastructure_support_buckets[count.index].name} policy: ${var.app_name}-${var.app_namespace}-${var.tfenv}" + policy = data.aws_iam_policy_document.aws_s3_infra_support_bucket_iam_policy_document[count.index].json + tags = var.tags +} + +module "aws_s3_infra_support_bucket_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "4.24" + + count = length(var.eks_infrastructure_support_buckets) + + role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-s3-custom-role-${var.eks_infrastructure_support_buckets[count.index].name}" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + + oidc_providers = { + main = { + provider_arn = var.oidc_provider_arn + namespace_service_accounts = "${var.eks_infrastructure_support_buckets[count.index].k8s_namespace_service_account_access}" + } + } + + # role_policy_arns = [ + # aws_iam_policy.aws_s3_infra_support_bucket_iam_policies[count.index].arn + # ] + + tags = var.tags +} diff --git a/provisioning/kubernetes/aws-support/efs-sg.tf b/aws-support/storage-class.efs.security-groups.tf similarity index 52% rename from provisioning/kubernetes/aws-support/efs-sg.tf rename to aws-support/storage-class.efs.security-groups.tf index 792f8b5..1c98860 100644 --- a/provisioning/kubernetes/aws-support/efs-sg.tf +++ b/aws-support/storage-class.efs.security-groups.tf @@ -1,4 +1,6 @@ resource "aws_security_group" "efs_security_group" { + count = var.aws_installations.storage_efs.eks_security_groups ? 1 : 0 + name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-efs" description = "${var.app_name}-${var.app_namespace}-${var.tfenv}-efs" vpc_id = var.vpc_id @@ -10,5 +12,12 @@ resource "aws_security_group" "efs_security_group" { cidr_blocks = ["${var.base_cidr_block}"] } - tags = var.tags -} + tags = { + "Environment" = var.tfenv + "Terraform" = "true" + "Namespace" = var.app_namespace + "Billingcustomer" = var.billingcustomer + "Product" = var.app_name + "Name" = "${var.app_name}-${var.app_namespace}-${var.tfenv}-efs" + } +} \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/storage-class.efs.tf b/aws-support/storage-class.efs.tf similarity index 74% rename from provisioning/kubernetes/aws-support/storage-class.efs.tf rename to aws-support/storage-class.efs.tf index 8cf436e..850c857 100644 --- a/provisioning/kubernetes/aws-support/storage-class.efs.tf +++ b/aws-support/storage-class.efs.tf @@ -1,4 +1,6 @@ resource "helm_release" "aws-efs-csi-driver" { + count = var.aws_installations.storage_efs.efs ? 1 : 0 + name = "aws-efs-csi-driver" repository = "https://kubernetes-sigs.github.io/aws-efs-csi-driver" chart = "aws-efs-csi-driver" @@ -22,7 +24,7 @@ resource "helm_release" "aws-efs-csi-driver" { set { name = "controller.serviceAccount.annotations.eks\\.amazonaws\\.com\\/role\\-arn" - value = aws_iam_role.amazoneks-efs-csi-driver-role.arn + value = module.aws_csi_irsa_role[0].iam_role_arn type = "string" } @@ -32,14 +34,11 @@ resource "helm_release" "aws-efs-csi-driver" { type = "string" } - - #set { - # name = "controller.serviceAccount.annotations" - # value = "eks.amazonaws.com/role-arn: arn:aws:iam::${data.aws_caller_identity.aws-support.account_id}:role/${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-EFS_CSI_Driver-role" - #} } resource "kubernetes_storage_class" "efs-storage-class" { + count = var.aws_installations.storage_efs.efs ? 1 : 0 + metadata { name = "efs" } diff --git a/provisioning/kubernetes/aws-support/storage-class.gp2-retain.tf b/aws-support/storage-class.gp2-retain.tf similarity index 78% rename from provisioning/kubernetes/aws-support/storage-class.gp2-retain.tf rename to aws-support/storage-class.gp2-retain.tf index 0f0a76b..0f84051 100644 --- a/provisioning/kubernetes/aws-support/storage-class.gp2-retain.tf +++ b/aws-support/storage-class.gp2-retain.tf @@ -1,4 +1,5 @@ resource "kubernetes_storage_class" "gp2-storage-class" { + count = try(var.aws_installations.storage_ebs.gp2, false) ? 1 : 0 metadata { name = "gp2-retain" } diff --git a/provisioning/kubernetes/aws-support/storage-class.gp3.tf b/aws-support/storage-class.gp3.tf similarity index 83% rename from provisioning/kubernetes/aws-support/storage-class.gp3.tf rename to aws-support/storage-class.gp3.tf index cc4f9a8..512b11e 100644 --- a/provisioning/kubernetes/aws-support/storage-class.gp3.tf +++ b/aws-support/storage-class.gp3.tf @@ -1,4 +1,6 @@ resource "helm_release" "gp3-storage-class" { + count = try(var.aws_installations.storage_ebs.gp3, false) ? 1 : 0 + name = "aws-ebs-csi-driver" repository = "https://kubernetes-sigs.github.io/aws-ebs-csi-driver" chart = "aws-ebs-csi-driver" @@ -21,11 +23,12 @@ resource "helm_release" "gp3-storage-class" { set { name = "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" - value = aws_iam_role.amazoneks-ebs-csi-driver-role.arn + value = module.aws_csi_irsa_role[0].iam_role_arn } } resource "kubernetes_storage_class" "gp3-storage-class" { + count = try(var.aws_installations.storage_ebs.gp3, false) ? 1 : 0 metadata { name = "gp3" annotations = { diff --git a/aws-support/storage-class.iam-policies.tf b/aws-support/storage-class.iam-policies.tf new file mode 100644 index 0000000..ea2527f --- /dev/null +++ b/aws-support/storage-class.iam-policies.tf @@ -0,0 +1,26 @@ +module "aws_csi_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "4.24" + + count = var.aws_installations.storage_efs.eks_irsa_role || var.aws_installations.storage_ebs.eks_irsa_role ? 1 : 0 + + role_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-CSI_Driver-role" + role_path = "/${var.app_name}/${var.app_namespace}/${var.tfenv}/" + + attach_ebs_csi_policy = var.aws_installations.storage_ebs.eks_irsa_role + attach_efs_csi_policy = var.aws_installations.storage_efs.eks_irsa_role + + oidc_providers = { + main = { + provider_arn = var.oidc_provider_arn + namespace_service_accounts = local.namespace_service_accounts + } + } +} + +locals { + namespace_service_accounts = concat( + var.aws_installations.storage_efs.eks_irsa_role ? ["kube-system:efs-csi-controller-sa"] : [], + var.aws_installations.storage_ebs.eks_irsa_role ? ["kube-system:ebs-csi-controller-sa"] : [] + ) +} \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/storage-class.st1.tf b/aws-support/storage-class.st1.tf similarity index 75% rename from provisioning/kubernetes/aws-support/storage-class.st1.tf rename to aws-support/storage-class.st1.tf index 8ae9f76..93690bf 100644 --- a/provisioning/kubernetes/aws-support/storage-class.st1.tf +++ b/aws-support/storage-class.st1.tf @@ -1,4 +1,5 @@ resource "kubernetes_storage_class" "st1-storage-class" { + count = try(var.aws_installations.storage_ebs.st1, false) ? 1 : 0 metadata { name = "st1" } diff --git a/provisioning/kubernetes/aws-support/variables.tf b/aws-support/variables.tf similarity index 55% rename from provisioning/kubernetes/aws-support/variables.tf rename to aws-support/variables.tf index 07285d9..de729b9 100644 --- a/provisioning/kubernetes/aws-support/variables.tf +++ b/aws-support/variables.tf @@ -9,4 +9,12 @@ variable "tfenv" {} variable "base_cidr_block" {} variable "billingcustomer" {} variable "node_count" {} +variable "name_prefix" {} +variable "aws_installations" {} +variable "eks_infrastructure_support_buckets" {} +variable "eks_infrastructure_kms_arn" {} +variable "oidc_provider_arn" {} variable "tags" {} +variable "route53_hosted_zone_arn" { + default = "" +} \ No newline at end of file diff --git a/cluster-aws-auth-cm.tf b/cluster-aws-auth-cm.tf new file mode 100644 index 0000000..92ca7fc --- /dev/null +++ b/cluster-aws-auth-cm.tf @@ -0,0 +1,15 @@ +resource "kubectl_manifest" "aws-auth" { + yaml_body = yamlencode({ + "apiVersion" = "v1" + "kind" = "ConfigMap" + "metadata" = { + "name" = "aws-auth" + "namespace" = "kube-system" + } + "data" = { + "mapUsers" = yamlencode(var.map_users) + "mapAccounts" = yamlencode(var.map_accounts) + "mapRoles" = yamlencode(local.aws_auth_roles) + } + }) +} \ No newline at end of file diff --git a/cluster-aws.tf b/cluster-aws.tf new file mode 100644 index 0000000..3b8dcf7 --- /dev/null +++ b/cluster-aws.tf @@ -0,0 +1,42 @@ +module "aws-support" { + source = "./aws-support" + depends_on = [module.eks] + + vpc_id = module.eks-vpc.vpc_id + cidr_blocks = module.eks-vpc.private_subnets_cidr_blocks + oidc_url = module.eks.cluster_oidc_issuer_url + account_id = data.aws_caller_identity.current.account_id + aws_region = var.aws_region + app_name = var.app_name + app_namespace = var.app_namespace + tfenv = var.tfenv + name_prefix = local.name_prefix + aws_installations = var.aws_installations + eks_infrastructure_support_buckets = var.custom_aws_s3_support_infra + eks_infrastructure_kms_arn = aws_kms_key.eks.arn + oidc_provider_arn = module.eks.oidc_provider_arn + base_cidr_block = module.subnet_addrs.base_cidr_block + billingcustomer = var.billingcustomer + node_count = var.instance_min_size # var.eks_managed_node_groups != null ? var.eks_managed_node_groups[keys(var.eks_managed_node_groups)[0]].min_capacity : var.instance_min_size + tags = local.base_tags + route53_hosted_zone_arn = try(aws_route53_zone.hosted_zone[0].arn, "") +} + +module "aws-cluster-autoscaler" { + source = "./aws-cluster-autoscaler" + depends_on = [module.eks] + + count = try(var.aws_installations.cluster_autoscaler ? 1 : 0, 1) + + app_name = var.app_name + app_namespace = var.app_namespace + tfenv = var.tfenv + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + aws_region = var.aws_region + tags = local.base_tags + + scale_down_util_threshold = var.autoscaling_configuration.scale_down_util_threshold + skip_nodes_with_local_storage = var.autoscaling_configuration.skip_nodes_with_local_storage + skip_nodes_with_system_pods = var.autoscaling_configuration.skip_nodes_with_system_pods + cordon_node_before_term = var.autoscaling_configuration.cordon_node_before_term +} \ No newline at end of file diff --git a/cluster-nodegroups.tf b/cluster-nodegroups.tf new file mode 100644 index 0000000..9493654 --- /dev/null +++ b/cluster-nodegroups.tf @@ -0,0 +1,70 @@ +module "eks_managed_node_group" { + source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group" + version = "~> 18.23.0" + + count = length(var.eks_managed_node_groups) + + name = var.eks_managed_node_groups[count.index].name + use_name_prefix = false + cluster_name = module.eks.cluster_id + cluster_version = var.cluster_version + + create_iam_role = true + iam_role_name = "${module.eks.cluster_id}-${var.eks_managed_node_groups[count.index].name}" + iam_role_attach_cni_policy = true + iam_role_use_name_prefix = false + + launch_template_name = "${module.eks.cluster_id}-${var.eks_managed_node_groups[count.index].name}" + launch_template_use_name_prefix = false + # iam_role_arn = module.eks.eks_managed_node_groups.iam_role.arn + + # cluster_ip_family = "ipv6" # NOT READY + vpc_id = module.eks-vpc.vpc_id + subnet_ids = concat( + var.eks_managed_node_groups[count.index].subnet_selections.public ? module.eks-vpc.public_subnets : [], + var.eks_managed_node_groups[count.index].subnet_selections.private ? module.eks-vpc.private_subnets : [] + ) + cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id + # vpc_security_group_ids = [module.eks.node_security_group_id] + create_security_group = false + + desired_size = var.eks_managed_node_groups[count.index].desired_capacity + max_size = var.eks_managed_node_groups[count.index].max_capacity + min_size = var.eks_managed_node_groups[count.index].min_capacity + + instance_types = var.eks_managed_node_groups[count.index].instance_types + ami_type = var.eks_managed_node_groups[count.index].ami_type != null ? var.eks_managed_node_groups[count.index].ami_type : var.default_ami_type + capacity_type = var.eks_managed_node_groups[count.index].capacity_type != null ? var.eks_managed_node_groups[count.index].capacity_type : var.default_capacity_type + + disk_size = var.eks_managed_node_groups[count.index].disk_size + ebs_optimized = true + + labels = merge( + { Environment = var.tfenv }, + zipmap( + [ + for x in var.eks_managed_node_groups[count.index].taints : x.key + if x.affinity_label + ], + [ + for x in var.eks_managed_node_groups[count.index].taints : x.value + if x.affinity_label + ] + ) + ) + + taints = { + for taint in var.eks_managed_node_groups[count.index].taints : taint.key => { + key = taint.key + value = taint.value + effect = taint.effect + affinity_label = taint.affinity_label + } + } + + tags = merge( + local.kubernetes_tags, + { "Name" : var.eks_managed_node_groups[count.index].name } + # var.eks_managed_node_groups[count.index][count.index].tags != null ? var.eks_managed_node_groups[count.index][count.index].tags : [] + ) +} \ No newline at end of file diff --git a/cluster.tf b/cluster.tf index 71fe98f..43bc7e3 100644 --- a/cluster.tf +++ b/cluster.tf @@ -1,58 +1,122 @@ module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "~> 17.15.0" - depends_on = [module.eks-vpc] - - cluster_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}" - # https://docs.gitlab.com/ee/user/project/clusters/#supported-cluster-versions - cluster_version = var.cluster_version - subnets = concat(module.eks-vpc.public_subnets, module.eks-vpc.private_subnets) - write_kubeconfig = "true" - kubeconfig_output_path = "./.kubeconfig.${var.app_name}_${var.app_namespace}_${var.tfenv}" - tags = local.tags - vpc_id = module.eks-vpc.vpc_id - - cluster_endpoint_private_access = true - cluster_endpoint_private_access_cidrs = module.eks-vpc.private_subnets_cidr_blocks - cluster_endpoint_public_access = length(var.cluster_endpoint_public_access_cidrs) > 0 ? true : false - cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs + source = "terraform-aws-modules/eks/aws" + version = "~> 18.23.0" + + cluster_name = local.name_prefix + cluster_version = var.cluster_version + + vpc_id = module.eks-vpc.vpc_id + subnet_ids = concat(module.eks-vpc.public_subnets, module.eks-vpc.private_subnets) + + cluster_endpoint_private_access = true + # cluster_endpoint_private_access_cidrs = module.eks-vpc.private_subnets_cidr_blocks + cluster_endpoint_public_access = length(var.cluster_endpoint_public_access_cidrs) > 0 ? true : false + cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs + + # IPV6 + # cluster_ip_family = "ipv6" # NOT READY YET + + # We are using the IRSA created below for permissions + # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) + # and then turn this off after the cluster/node group is created. Without this initial policy, + # the VPC CNI fails to assign IPs and nodes cannot join the cluster + # See https://github.com/aws/containers-roadmap/issues/1666 for more context + # TODO - remove this policy once AWS releases a managed version similar to AmazonEKS_CNI_Policy (IPv4) + # create_cni_ipv6_iam_policy = true + + cluster_addons = var.cluster_addons + + cluster_encryption_config = [{ + provider_key_arn = aws_kms_key.eks.arn + resources = ["secrets"] + }] + + # EKS Managed Node Group(s) + eks_managed_node_group_defaults = { + ami_type = var.default_ami_type + + attach_cluster_primary_security_group = true + + # We are using the IRSA created below for permissions + # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) + # and then turn this off after the cluster/node group is created. Without this initial policy, + # the VPC CNI fails to assign IPs and nodes cannot join the cluster + # See https://github.com/aws/containers-roadmap/issues/1666 for more context + iam_role_attach_cni_policy = true + } + + eks_managed_node_groups = length(var.eks_managed_node_groups) > 0 ? {} : local.default_node_group cluster_enabled_log_types = ["api", "authenticator", "audit", "scheduler", "controllerManager"] - cluster_encryption_config = [ - { - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - } - ] + enable_irsa = true - node_groups_defaults = { - ami_type = var.default_ami_type - disk_size = var.root_vol_size - } + create_aws_auth_configmap = false + manage_aws_auth_configmap = false - workers_group_defaults = { - instance_type = var.instance_type + # aws_auth_roles = local.default_aws_auth_roles + # aws_auth_users = var.map_users + # aws_auth_accounts = var.map_accounts + + cluster_tags = local.base_tags + tags = { + Environment = var.tfenv + Terraform = "true" + Namespace = var.app_namespace + Billingcustomer = var.billingcustomer + Product = var.app_name + infrastructure-eks-terraform = local.module_version } +} - node_groups = length(var.managed_node_groups) > 0 ? {} : local.default_node_group +resource "aws_iam_policy" "node_additional" { + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = module.eks.eks_managed_node_groups - map_roles = concat(var.map_roles, local.default_aws_auth_roles) - map_users = var.map_users - map_accounts = var.map_accounts + policy_arn = aws_iam_policy.node_additional.arn + role = each.value.iam_role_name } resource "aws_kms_key" "eks" { - enable_key_rotation = true - description = "${var.app_name}-${var.app_namespace}-${var.tfenv} EKS Secret Encryption Key" - tags = local.tags + description = "${local.name_prefix} EKS Encryption Key" + multi_region = "true" + enable_key_rotation = true + deletion_window_in_days = 30 + tags = merge({ + Name = "${local.name_prefix}-key" + }, local.base_tags) } +resource "aws_kms_alias" "eks" { + name = "alias/${local.name_prefix}-kms" + target_key_id = aws_kms_key.eks.key_id +} + +resource "aws_kms_replica_key" "eks" { + description = "${local.name_prefix} EKS Replica Key (Multi-Region)" + deletion_window_in_days = 30 + primary_key_arn = aws_kms_key.eks.arn + provider = aws.secondary +} -data "aws_eks_cluster" "my-cluster" { +data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id } -data "aws_eks_cluster_auth" "my-auth" { +data "aws_eks_cluster_auth" "cluster" { name = module.eks.cluster_id } diff --git a/custom_node_groups.tf b/custom_node_groups.tf deleted file mode 100644 index 153d901..0000000 --- a/custom_node_groups.tf +++ /dev/null @@ -1,50 +0,0 @@ -resource "aws_eks_node_group" "custom_node_group" { - count = length(var.managed_node_groups) - - cluster_name = "${var.app_name}-${var.app_namespace}-${var.tfenv}" - node_group_name = var.managed_node_groups[count.index].name - node_role_arn = module.eks.worker_iam_role_arn - subnet_ids = concat( - var.managed_node_groups[count.index].subnet_selections.public ? module.eks-vpc.public_subnets : [], - var.managed_node_groups[count.index].subnet_selections.private ? module.eks-vpc.private_subnets : [] - ) - - scaling_config { - desired_size = var.managed_node_groups[count.index].desired_capacity - max_size = var.managed_node_groups[count.index].max_capacity - min_size = var.managed_node_groups[count.index].min_capacity - } - - disk_size = var.managed_node_groups[count.index].disk_size - instance_types = [var.managed_node_groups[count.index].instance_type] - ami_type = var.managed_node_groups[count.index].ami_type != null ? var.managed_node_groups[count.index].ami_type : var.default_ami_type - - labels = merge( - { Environment = var.tfenv }, - zipmap( - [ - for x in var.managed_node_groups[count.index].taints : x.key - if x.affinity_label - ], - [ - for x in var.managed_node_groups[count.index].taints : x.value - if x.affinity_label - ] - ) - ) - tags = merge(local.kubernetes_tags, local.tags) - dynamic "taint" { - for_each = var.managed_node_groups[count.index].taints - content { - key = taint.value["key"] - value = taint.value["value"] - effect = taint.value["effect"] - } - } - - # Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling. - # Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces. - depends_on = [ - module.eks-vpc, module.eks - ] -} diff --git a/examples/basic/data.tf b/examples/basic/data.tf new file mode 100644 index 0000000..d78fce4 --- /dev/null +++ b/examples/basic/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} \ No newline at end of file diff --git a/examples/basic/main.tf b/examples/basic/main.tf index 92e48bb..30e52cd 100644 --- a/examples/basic/main.tf +++ b/examples/basic/main.tf @@ -1,7 +1,65 @@ module "infrastructure-terraform-eks" { - source = "../.." + source = "../.." - aws_region = "ap-southeast-1" - tfenv = "test-basic" - root_domain_name = "basic.example.com" -} \ No newline at end of file + aws_region = "ap-southeast-1" + aws_secondary_region = var.aws_region_secondary + + app_namespace = "testing" + tfenv = "basic" + cluster_version = "1.21" + helm_installations = { + gitlab_runner = false + vault_consul = false + ingress = true + elasticstack = false + grafana = false + argocd = false + dashboard = false + } + billingcustomer = "testing" + cluster_root_domain = { + name = "testing.example.xyz" + create = true + } + + google_clientID = "" + google_clientSecret = "" + google_authDomain = "google.com" + + create_launch_template = true + cluster_endpoint_public_access_cidrs = ["0.0.0.0/0"] + + map_users = [ + { "userarn" : "${data.aws_caller_identity.current.arn}", "username" : "admin", "groups" : ["system:masters"] }, + ] + map_roles = [ + { "rolearn" : "${data.aws_caller_identity.current.arn}", "username" : "admin", "groups" : ["system:masters"] }, + ] + + eks_managed_node_groups = [ + { + name = "test-application" + ami_type = "AL2_ARM_64" + create_launch_template = true + desired_capacity = 6 + max_capacity = 6 + min_capacity = 6 + instance_types = ["m6g.large"] + capacity_type = "ON_DEMAND" + disk_size = 30 + disk_encrypted = true + + taints = [] + + tags = {} + subnet_selections = { + public = false + private = true + } + public_ip = false + key_name = "" + } + ] + + tech_email = var.tech_email +} diff --git a/examples/basic/providers.tf b/examples/basic/providers.tf index 2a5ccf3..95aefd6 100644 --- a/examples/basic/providers.tf +++ b/examples/basic/providers.tf @@ -1,3 +1,29 @@ provider "aws" { - region = var.aws_region + region = var.aws_region +} + +provider "aws" { + alias = "secondary" + region = var.aws_region_secondary +} + +provider "kubernetes" { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) +} + +provider "helm" { + kubernetes { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) + } +} + +provider "kubectl" { + load_config_file = false + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) } \ No newline at end of file diff --git a/examples/basic/vars.tf b/examples/basic/vars.tf index 3654b2f..258df56 100644 --- a/examples/basic/vars.tf +++ b/examples/basic/vars.tf @@ -1,8 +1,19 @@ variable "gitlab_token" { description = "Gitlab Token" + default = "" } variable "aws_region" { description = "AWS Region for Provisioning" default = "ap-southeast-1" +} + +variable "aws_region_secondary" { + description = "Secondary AWS Region for Provisioning" + default = "eu-west-1" +} + +variable "tech_email" { + description = "Tech Email for Contact" + default = "tech@example.com" } \ No newline at end of file diff --git a/examples/basic/versions.tf b/examples/basic/versions.tf new file mode 100644 index 0000000..efd2c2c --- /dev/null +++ b/examples/basic/versions.tf @@ -0,0 +1,28 @@ +terraform { + required_version = ">= 1.0" + experiments = [module_variable_optional_attrs] + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.5" + configuration_aliases = [aws.secondary] + } + gitlab = { + source = "gitlabhq/gitlab" + version = "~> 3.4" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.11.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.14.0" + } + } +} diff --git a/examples/extended-solution-gitlab/data.tf b/examples/extended-solution-gitlab/data.tf new file mode 100644 index 0000000..d78fce4 --- /dev/null +++ b/examples/extended-solution-gitlab/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} \ No newline at end of file diff --git a/examples/extended-solution-gitlab/gitlab-management/gitlab-admin.tf b/examples/extended-solution-gitlab/gitlab-management/gitlab-admin.tf deleted file mode 100755 index 6b7ef9a..0000000 --- a/examples/extended-solution-gitlab/gitlab-management/gitlab-admin.tf +++ /dev/null @@ -1,79 +0,0 @@ -# GITLAB MANAGED APPS INTEGRATION -resource "kubernetes_service_account" "gitlab-admin" { - metadata { - name = "gitlab-admin" - namespace = "kube-system" - } -} - -resource "kubernetes_secret" "gitlab-admin" { - metadata { - name = "gitlab-admin" - namespace = "kube-system" - annotations = { - "kubernetes.io/service-account.name" = kubernetes_service_account.gitlab-admin.metadata.0.name - } - } - lifecycle { - ignore_changes = [ - data - ] - } - type = "kubernetes.io/service-account-token" -} - -data "kubernetes_secret" "gitlab-admin-token" { - metadata { - name = kubernetes_service_account.gitlab-admin.default_secret_name - namespace = "kube-system" - } -} - -resource "kubernetes_cluster_role_binding" "gitlab-admin" { - metadata { - name = "gitlab-admin" - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = "cluster-admin" - } - subject { - kind = "ServiceAccount" - name = "gitlab-admin" - namespace = "kube-system" - } -} - -# GITLAB K8S ENV INTEGRATION -data "gitlab_group" "gitops_namespace" { - full_path = var.gitlab_namespace -} - -resource "gitlab_group_cluster" "aws_cluster" { - group = data.gitlab_group.gitops_namespace.id - name = var.eks.cluster_id - domain = "${var.tfenv}.${var.root_domain_name}" - environment_scope = var.tfenv == "prod" ? "production" : var.cluster_environment_scope - kubernetes_api_url = var.eks.cluster_endpoint - kubernetes_token = data.kubernetes_secret.gitlab-admin-token.data.token - kubernetes_ca_cert = trimspace(base64decode(var.eks.cluster_certificate_authority_data)) - -} - -# Work Around for lack of `management_project_id` in gitlab_group_cluster -locals { - group_cluster_api_url = join("", ["https://gitlab.com/api/v4/", "groups/", gitlab_group_cluster.aws_cluster.group, "/clusters/", split(":", gitlab_group_cluster.aws_cluster.id)[1]]) - curl_cmd = join("", ["curl -s --header \"Private-Token: $GITLAB_TOKEN\" ", - local.group_cluster_api_url, - " -H 'Content-Type:application/json' --request PUT --data '{\"management_project_id\":\"'$CLUSTER_MANAGEMENT_PROJECT_ID'\"}'"]) -} - -resource "null_resource" "gitlab-management-cluster-associate" { - triggers = { cluster_id = gitlab_group_cluster.aws_cluster.id } - - provisioner "local-exec" { - command = local.curl_cmd - } - depends_on = [gitlab_group_cluster.aws_cluster] -} \ No newline at end of file diff --git a/examples/extended-solution-gitlab/gitlab-management/vars.tf b/examples/extended-solution-gitlab/gitlab-management/vars.tf deleted file mode 100644 index 9f887e0..0000000 --- a/examples/extended-solution-gitlab/gitlab-management/vars.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "gitlab_token" { - description = "Gitlab Token" -} - -variable "gitlab_namespace" { - description = "Gitlab Namespace where K8s can be viewed and monitored at" -} - -variable "eks" { - description = "Pass along the module responses from parent EKS cluster configuration" -} - -variable "cluster_environment_scope" { - description = "Environment Scope that the cluster should cover according to Gitlab CI" -} - -variable "tfenv" {} -variable "app_namespace" {} -variable "root_domain_name" {} \ No newline at end of file diff --git a/examples/extended-solution-gitlab/gitlab-management/versions.tf b/examples/extended-solution-gitlab/gitlab-management/versions.tf deleted file mode 100644 index d29ee90..0000000 --- a/examples/extended-solution-gitlab/gitlab-management/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_providers { - gitlab = { - source = "gitlabhq/gitlab" - } - kubernetes = { - source = "hashicorp/kubernetes" - } - null = { - source = "hashicorp/null" - } - } - required_version = ">= 0.13" -} diff --git a/examples/extended-solution-gitlab/main.tf b/examples/extended-solution-gitlab/main.tf index f07faf2..4731f3a 100644 --- a/examples/extended-solution-gitlab/main.tf +++ b/examples/extended-solution-gitlab/main.tf @@ -6,19 +6,6 @@ module "infrastructure-terraform-eks" { root_domain_name = "basic.example.com" } -module "gitlab-management" { - source = "./gitlab-management" - depends_on = [module.infrastructure-terraform-eks] - - gitlab_token = var.gitlab_token - gitlab_namespace = var.gitlab_namespace - app_namespace = var.app_namespace - tfenv = var.tfenv - eks = module.infrastructure-terraform-eks - root_domain_name = var.root_domain_name - cluster_environment_scope = var.cluster_environment_scope -} - module "gitlab_runner" { source = "./gitlab-runner" depends_on = [module.infrastructure-terraform-eks] diff --git a/examples/extended-solution-gitlab/providers.tf b/examples/extended-solution-gitlab/providers.tf index 26843d5..de1c02d 100644 --- a/examples/extended-solution-gitlab/providers.tf +++ b/examples/extended-solution-gitlab/providers.tf @@ -1,13 +1,32 @@ provider "aws" { region = var.aws_region - profile = var.aws_profile +} + +provider "aws" { + alias = "secondary" + region = var.aws_region_secondary +} - assume_role { - role_arn = var.serviceaccount_role - external_id = "infrastructure-eks-terraform" +provider "kubernetes" { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) +} + +provider "helm" { + kubernetes { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) } } +provider "kubectl" { + load_config_file = false + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) +} provider "gitlab" { base_url = "https://gitlab.com/api/v4/" token = var.gitlab_token diff --git a/examples/full-solution-k8s/data.tf b/examples/full-solution-k8s/data.tf new file mode 100644 index 0000000..d78fce4 --- /dev/null +++ b/examples/full-solution-k8s/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} \ No newline at end of file diff --git a/examples/full-solution-k8s/main.tf b/examples/full-solution-k8s/main.tf new file mode 100644 index 0000000..15667e8 --- /dev/null +++ b/examples/full-solution-k8s/main.tf @@ -0,0 +1,70 @@ +module "infrastructure-terraform-eks" { + source = "../.." + + aws_region = "ap-southeast-1" + aws_secondary_region = var.aws_region_secondary + + app_namespace = "testing" + tfenv = "basic" + cluster_version = "1.21" + helm_installations = { + gitlab_runner = false + vault_consul = true + ingress = true + elasticstack = true + grafana = true + argocd = false + dashboard = true + } + helm_configurations = { + vault_consul = { + enable_aws_vault_unseal = true + } + } + billingcustomer = "testing" + cluster_root_domain = { + name = "testing.example.xyz" + create = true + } + + google_clientID = "" + google_clientSecret = "" + google_authDomain = "google.com" + + create_launch_template = true + cluster_endpoint_public_access_cidrs = ["0.0.0.0/0"] + + map_users = [ + { "userarn" : "${data.aws_caller_identity.current.arn}", "username" : "admin", "groups" : ["system:masters"] }, + ] + map_roles = [ + { "rolearn" : "${data.aws_caller_identity.current.arn}", "username" : "admin", "groups" : ["system:masters"] }, + ] + + eks_managed_node_groups = [ + { + name = "test-application" + ami_type = "AL2_ARM_64" + create_launch_template = true + desired_capacity = 6 + max_capacity = 6 + min_capacity = 6 + instance_types = ["m6g.large"] + capacity_type = "ON_DEMAND" + disk_size = 30 + disk_encrypted = true + + taints = [] + + tags = {} + subnet_selections = { + public = false + private = true + } + public_ip = false + key_name = "" + } + ] + + tech_email = var.tech_email +} \ No newline at end of file diff --git a/examples/full-solution-k8s/providers.tf b/examples/full-solution-k8s/providers.tf new file mode 100644 index 0000000..de1c02d --- /dev/null +++ b/examples/full-solution-k8s/providers.tf @@ -0,0 +1,33 @@ +provider "aws" { + region = var.aws_region +} + +provider "aws" { + alias = "secondary" + region = var.aws_region_secondary +} + +provider "kubernetes" { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) +} + +provider "helm" { + kubernetes { + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) + } +} + +provider "kubectl" { + load_config_file = false + host = module.infrastructure-terraform-eks.kubernetes-cluster-endpoint + token = module.infrastructure-terraform-eks.kubernetes-cluster-auth.token + cluster_ca_certificate = base64decode(module.infrastructure-terraform-eks.kubernetes-cluster-certificate-authority-data) +} +provider "gitlab" { + base_url = "https://gitlab.com/api/v4/" + token = var.gitlab_token +} \ No newline at end of file diff --git a/examples/full-solution-k8s/vars.tf b/examples/full-solution-k8s/vars.tf new file mode 100644 index 0000000..258df56 --- /dev/null +++ b/examples/full-solution-k8s/vars.tf @@ -0,0 +1,19 @@ +variable "gitlab_token" { + description = "Gitlab Token" + default = "" +} + +variable "aws_region" { + description = "AWS Region for Provisioning" + default = "ap-southeast-1" +} + +variable "aws_region_secondary" { + description = "Secondary AWS Region for Provisioning" + default = "eu-west-1" +} + +variable "tech_email" { + description = "Tech Email for Contact" + default = "tech@example.com" +} \ No newline at end of file diff --git a/examples/optional-components-k8s/datadog-dashboard/datadog-infrastructure.tf b/examples/optional-components-k8s/datadog-dashboard/datadog-infrastructure.tf deleted file mode 100644 index 28242d3..0000000 --- a/examples/optional-components-k8s/datadog-dashboard/datadog-infrastructure.tf +++ /dev/null @@ -1,34 +0,0 @@ -resource "helm_release" "datadog-dashboard" { - name = "datadog-dashboard-${var.app_namespace}-${var.tfenv}" - repository = "https://helm.datadoghq.com" - chart = "datadog" - version = "2.8.3" - namespace = "monitoring" - create_namespace = false - - values = [ - local_file.values_yaml.content - ] -} - -resource "local_file" "values_yaml" { - content = yamlencode(local.helmChartValues) - filename = "${path.module}/src/values.overrides.v2.8.3.yaml" -} - -locals { - helmChartValues = { - "datadog" = { - "apiKey" : var.datadog_apikey, - "clusterName" : "eks-${var.app_namespace}-${var.tfenv}", - "site" : "datadoghq.eu", - "logs" : { - "enabled" : true - } - } - } -} - -variable "app_namespace" {} -variable "tfenv" {} -variable "datadog_apikey" {} \ No newline at end of file diff --git a/examples/optional-components-k8s/datadog-dashboard/src/values.v2.8.3.yaml b/examples/optional-components-k8s/datadog-dashboard/src/values.v2.8.3.yaml deleted file mode 100644 index ba7a386..0000000 --- a/examples/optional-components-k8s/datadog-dashboard/src/values.v2.8.3.yaml +++ /dev/null @@ -1,1110 +0,0 @@ -## Default values for Datadog Agent -## See Datadog helm documentation to learn more: -## https://docs.datadoghq.com/agent/kubernetes/helm/ - -# nameOverride -- Override name of app -nameOverride: # "" - -# fullnameOverride -- Override the full qualified app name -fullnameOverride: # "" - -# targetSystem -- Target OS for this deployment (possible values: linux, windows) -targetSystem: "linux" - -# registry -- Registry to use for all Agent images (default gcr.io) -## Currently we offer Datadog Agent images on: -## GCR - use gcr.io/datadoghq (default) -## DockerHub - use docker.io/datadog -## AWS - use public.ecr.aws/datadog -registry: gcr.io/datadoghq - -datadog: - # datadog.apiKey -- Your Datadog API key - # ref: https://app.datadoghq.com/account/settings#agent/kubernetes - apiKey: - - # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one - ## If set, this parameter takes precedence over "apiKey". - apiKeyExistingSecret: # - - # datadog.appKey -- Datadog APP key required to use metricsProvider - ## If you are using clusterAgent.metricsProvider.enabled = true, you must set - ## a Datadog application key for read access to your metrics. - appKey: # - - # datadog.appKeyExistingSecret -- Use existing Secret which stores APP key instead of creating a new one - ## If set, this parameter takes precedence over "appKey". - appKeyExistingSecret: # - - # datadog.securityContext -- Allows you to overwrite the default PodSecurityContext on the Daemonset or Deployment - securityContext: {} - # seLinuxOptions: - # user: "system_u" - # role: "system_r" - # type: "spc_t" - # level: "s0" - - # datadog.hostVolumeMountPropagation -- Allow to specify the `mountPropagation` value on all volumeMounts using HostPath - ## ref: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation - hostVolumeMountPropagation: None - - # datadog.clusterName -- Set a unique cluster name to allow scoping hosts and Cluster Checks easily - ## The name must be unique and must be dot-separated tokens with the following restrictions: - ## * Lowercase letters, numbers, and hyphens only. - ## * Must start with a letter. - ## * Must end with a number or a letter. - ## * Overall length should not be higher than 80 characters. - ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: - ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name - clusterName: # - - # datadog.site -- The site of the Datadog intake to send Agent data to - ## Set to 'datadoghq.eu' to send data to the EU site. - site: # datadoghq.com - - # datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL - ## Overrides the site setting defined in "site". - dd_url: # https://app.datadoghq.com - - # datadog.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, off - logLevel: INFO - - # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment - ## ref: https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics - kubeStateMetricsEnabled: true - - kubeStateMetricsNetworkPolicy: - # datadog.kubeStateMetricsNetworkPolicy.create -- If true, create a NetworkPolicy for kube state metrics - create: false - - ## Manage Cluster checks feature - ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ - ## Autodiscovery via Kube Service annotations is automatically enabled - clusterChecks: - # datadog.clusterChecks.enabled -- Enable the Cluster Checks feature on both the cluster-agents and the daemonset - enabled: true - - # datadog.nodeLabelsAsTags -- Provide a mapping of Kubernetes Node Labels to Datadog Tags - nodeLabelsAsTags: {} - # beta.kubernetes.io/instance-type: aws-instance-type - # kubernetes.io/role: kube_role - # : - - # datadog.podLabelsAsTags -- Provide a mapping of Kubernetes Labels to Datadog Tags - podLabelsAsTags: {} - # app: kube_app - # release: helm_release - # : - - # datadog.podAnnotationsAsTags -- Provide a mapping of Kubernetes Annotations to Datadog Tags - podAnnotationsAsTags: {} - # iam.amazonaws.com/role: kube_iamrole - # : - - # datadog.tags -- List of static tags to attach to every metric, event and service check collected by this Agent. - ## Learn more about tagging: https://docs.datadoghq.com/tagging/ - tags: [] - # - ":" - # - ":" - - # kubelet configuration - kubelet: - # datadog.kubelet.host -- Override kubelet IP - host: - valueFrom: - fieldRef: - fieldPath: status.hostIP - # datadog.kubelet.tlsVerify -- Toggle kubelet TLS verification - # @default -- true - tlsVerify: # false - - ## dogstatsd configuration - ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ - ## To emit custom metrics from your Kubernetes application, use DogStatsD. - dogstatsd: - # datadog.dogstatsd.port -- Override the Agent DogStatsD port - ## Note: Make sure your client is sending to the same UDP port. - port: 8125 - - # datadog.dogstatsd.originDetection -- Enable origin detection for container tagging - ## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging - originDetection: false - - # datadog.dogstatsd.tags -- List of static tags to attach to every custom metric, event and service check collected by Dogstatsd. - ## Learn more about tagging: https://docs.datadoghq.com/tagging/ - tags: [] - # - ":" - # - ":" - - # datadog.dogstatsd.tagCardinality -- Sets the tag cardinality relative to the origin detection - ## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging - tagCardinality: low - - # datadog.dogstatsd.useSocketVolume -- Enable dogstatsd over Unix Domain Socket - ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ - useSocketVolume: false - - # datadog.dogstatsd.socketPath -- Path to the DogStatsD socket - socketPath: /var/run/datadog/dsd.socket - - # datadog.dogstatsd.hostSocketPath -- Host path to the DogStatsD socket - hostSocketPath: /var/run/datadog/ - - # datadog.dogstatsd.useHostPort -- Sets the hostPort to the same value of the container port - ## Needs to be used for sending custom metrics. - ## The ports need to be available on all hosts. - ## - ## WARNING: Make sure that hosts using this are properly firewalled otherwise - ## metrics and traces are accepted from any host able to connect to this host. - useHostPort: false - - # datadog.dogstatsd.useHostPID -- Run the agent in the host's PID namespace - ## This is required for Dogstatsd origin detection to work. - ## See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ - useHostPID: false - - # datadog.dogstatsd.nonLocalTraffic -- Enable this to make each node accept non-local statsd traffic (from outside of the pod) - ## ref: https://github.com/DataDog/docker-dd-agent#environment-variables - nonLocalTraffic: true - - # datadog.collectEvents -- Enables this to start event collection from the kubernetes API - ## ref: https://docs.datadoghq.com/agent/kubernetes/#event-collection - collectEvents: true - - # datadog.leaderElection -- Enables leader election mechanism for event collection - leaderElection: true - - # datadog.leaderLeaseDuration -- Set the lease time for leader election in second - leaderLeaseDuration: # 60 - - ## Enable logs agent and provide custom configs - logs: - # datadog.logs.enabled -- Enables this to activate Datadog Agent log collection - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - enabled: false - - # datadog.logs.containerCollectAll -- Enable this to allow log collection for all containers - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - containerCollectAll: false - - # datadog.logs.containerCollectUsingFiles -- Collect logs from files in /var/log/pods instead of using container runtime API - ## It's usually the most efficient way of collecting logs. - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - containerCollectUsingFiles: true - - ## Enable apm agent and provide custom configs - apm: - # datadog.apm.enabled -- Enable this to enable APM and tracing, on port 8126 - ## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host - enabled: false - - # datadog.apm.port -- Override the trace Agent port - ## Note: Make sure your client is sending to the same UDP port. - port: 8126 - - # datadog.apm.useSocketVolume -- Enable APM over Unix Domain Socket - ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - useSocketVolume: false - - # datadog.apm.socketPath -- Path to the trace-agent socket - socketPath: /var/run/datadog/apm.socket - - # datadog.apm.hostSocketPath -- Host path to the trace-agent socket - hostSocketPath: /var/run/datadog/ - - # datadog.envFrom -- Set environment variables for all Agents directly from configMaps and/or secrets - ## envFrom to pass configmaps or secrets as environment - envFrom: [] - # - configMapRef: - # name: - # - secretRef: - # name: - - # datadog.env -- Set environment variables for all Agents - ## The Datadog Agent supports many environment variables. - ## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables - env: [] - # - name: - # value: - - # datadog.confd -- Provide additional check configurations (static and Autodiscovery) - ## Each key becomes a file in /conf.d - ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes - ## ref: https://docs.datadoghq.com/agent/autodiscovery/ - confd: {} - # redisdb.yaml: |- - # init_config: - # instances: - # - host: "name" - # port: "6379" - # kubernetes_state.yaml: |- - # ad_identifiers: - # - kube-state-metrics - # init_config: - # instances: - # - kube_state_url: http://%%host%%:8080/metrics - - # datadog.checksd -- Provide additional custom checks as python code - ## Each key becomes a file in /checks.d - ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes - checksd: {} - # service.py: |- - - # datadog.dockerSocketPath -- Path to the docker socket - dockerSocketPath: # /var/run/docker.sock - - # datadog.criSocketPath -- Path to the container runtime socket (if different from Docker) - criSocketPath: # /var/run/containerd/containerd.sock - - ## Enable process agent and provide custom configs - processAgent: - # datadog.processAgent.enabled -- Set this to true to enable live process monitoring agent - ## Note: /etc/passwd is automatically mounted to allow username resolution. - ## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset - enabled: true - - # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent - ## Requires processAgent.enabled to be set to true to have any effect - processCollection: false - - ## Enable systemProbe agent and provide custom configs - systemProbe: - - # datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent - debugPort: 0 - - # datadog.systemProbe.enableConntrack -- Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data - ## Ref: http://conntrack-tools.netfilter.org/ - enableConntrack: true - - # datadog.systemProbe.seccomp -- Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges - ## Note that this will break `kubectl exec … -c system-probe -- /bin/bash` - seccomp: localhost/system-probe - - # datadog.systemProbe.seccompRoot -- Specify the seccomp profile root directory - seccompRoot: /var/lib/kubelet/seccomp - - # datadog.systemProbe.bpfDebug -- Enable logging for kernel debug - bpfDebug: false - - # datadog.systemProbe.apparmor -- Specify a apparmor profile for system-probe - apparmor: unconfined - - # datadog.systemProbe.enableTCPQueueLength -- Enable the TCP queue length eBPF-based check - enableTCPQueueLength: false - - # datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check - enableOOMKill: false - - # datadog.systemProbe.collectDNSStats -- Enable DNS stat collection - collectDNSStats: false - - orchestratorExplorer: - # datadog.orchestratorExplorer.enabled -- Set this to false to disable the orchestrator explorer - ## This requires processAgent.enabled and clusterAgent.enabled to be set to true - ## ref: TODO - add doc link - enabled: true - - # datadog.orchestratorExplorer.container_scrubbing -- Enable the scrubbing of containers in the kubernetes resource YAML for sensitive information - ## The container scrubbing is taking significant resources during data collection. - ## If you notice that the cluster-agent uses too much CPU in larger clusters - ## turning this option off will improve the situation. - container_scrubbing: - enabled: true - - networkMonitoring: - # datadog.networkMonitoring.enabled -- Enable network performance monitoring - enabled: false - - ## Enable security agent and provide custom configs - securityAgent: - compliance: - # datadog.securityAgent.compliance.enabled -- Set this to true to enable compliance checks - enabled: false - - # datadog.securityAgent.compliance.configMap -- Contains compliance benchmarks that will be used - configMap: - - # datadog.securityAgent.compliance.checkInterval -- Compliance check run interval - checkInterval: 20m - - runtime: - # datadog.securityAgent.runtime.enabled -- Set to true to enable the Security Runtime Module - enabled: false - - policies: - # datadog.securityAgent.runtime.policies.configMap -- Contains policies that will be used - configMap: - - syscallMonitor: - # datadog.securityAgent.runtime.syscallMonitor.enabled -- Set to true to enable the Syscall monitoring. - enabled: false - - ## Manage NetworkPolicy - networkPolicy: - # datadog.networkPolicy.create -- If true, create NetworkPolicy for all the components - create: false - - # datadog.networkPolicy.flavor -- Flavor of the network policy to use. - # Can be: - # * kubernetes for networking.k8s.io/v1/NetworkPolicy - # * cilium for cilium.io/v2/CiliumNetworkPolicy - flavor: kubernetes - - cilium: - # datadog.networkPolicy.cilium.dnsSelector -- Cilium selector of the DNS server entity - # @default -- kube-dns in namespace kube-system - dnsSelector: - toEndpoints: - - matchLabels: - "k8s:io.kubernetes.pod.namespace": kube-system - "k8s:k8s-app": kube-dns - -## This is the Datadog Cluster Agent implementation that handles cluster-wide -## metrics more cleanly, separates concerns for better rbac, and implements -## the external metrics API so you can autoscale HPAs based on datadog metrics -## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ -clusterAgent: - # clusterAgent.enabled -- Set this to false to disable Datadog Cluster Agent - enabled: true - - ## Define the Datadog Cluster-Agent image to work with - image: - # clusterAgent.image.name -- Cluster Agent image name to use (relative to `registry`) - name: cluster-agent - - # clusterAgent.image.tag -- Cluster Agent image tag to use - tag: 1.10.0 - - # clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent - repository: - - # clusterAgent.image.pullPolicy -- Cluster Agent image pullPolicy - pullPolicy: IfNotPresent - - # clusterAgent.image.pullSecrets -- Cluster Agent repository pullSecret (ex: specify docker registry credentials) - ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - pullSecrets: [] - # - name: "" - - # clusterAgent.securityContext -- Allows you to overwrite the default PodSecurityContext on the cluster-agent pods. - securityContext: {} - - # clusterAgent.command -- Command to run in the Cluster Agent container as entrypoint - command: [] - - # clusterAgent.token -- Cluster Agent token is a preshared key between node agents and cluster agent (autogenerated if empty, needs to be at least 32 characters a-zA-z) - token: "" - - # clusterAgent.tokenExistingSecret -- Existing secret name to use for Cluster Agent token - tokenExistingSecret: "" - - # clusterAgent.replicas -- Specify the of cluster agent replicas, if > 1 it allow the cluster agent to work in HA mode. - replicas: 1 - - ## Provide Cluster Agent Deployment pod(s) RBAC configuration - rbac: - # clusterAgent.rbac.create -- If true, create & use RBAC resources - create: true - - # clusterAgent.rbac.serviceAccountName -- Specify service account name to use (usually pre-existing, created if create is true) - serviceAccountName: default - - ## Provide Cluster Agent PodSecurityPolicy configuration - podSecurity: - podSecurityPolicy: - # clusterAgent.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Cluster Agent pods - create: false - - # Enable the metricsProvider to be able to scale based on metrics in Datadog - metricsProvider: - # clusterAgent.metricsProvider.enabled -- Set this to true to enable Metrics Provider - enabled: false - - # clusterAgent.metricsProvider.wpaController -- Enable informer and controller of the watermark pod autoscaler - ## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before - wpaController: false - - # clusterAgent.metricsProvider.useDatadogMetrics -- Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries - ## NOTE: It will install DatadogMetrics CRD automatically (it may conflict with previous installations) - useDatadogMetrics: false - - # clusterAgent.metricsProvider.createReaderRbac -- Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) - createReaderRbac: true - - # clusterAgent.metricsProvider.aggregator -- Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) - aggregator: avg - - ## Configuration for the service for the cluster-agent metrics server - service: - # clusterAgent.metricsProvider.service.type -- Set type of cluster-agent metrics server service - type: ClusterIP - - # clusterAgent.metricsProvider.service.port -- Set port of cluster-agent metrics server service (Kubernetes >= 1.15) - port: 8443 - - # clusterAgent.env -- Set environment variables specific to Cluster Agent - ## The Cluster-Agent supports many additional environment variables - ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options - env: [] - - admissionController: - # clusterAgent.admissionController.enabled -- Enable the admissionController to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods - enabled: false - - # clusterAgent.admissionController.mutateUnlabelled -- Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"' - mutateUnlabelled: false - - # clusterAgent.confd -- Provide additional cluster check configurations - ## Each key will become a file in /conf.d - ## ref: https://docs.datadoghq.com/agent/autodiscovery/ - confd: {} - # mysql.yaml: |- - # cluster_check: true - # instances: - # - server: '' - # port: 3306 - # user: datadog - # pass: '' - - # clusterAgent.resources -- Datadog cluster-agent resource requests and limits. - resources: {} - # requests: - # cpu: 200m - # memory: 256Mi - # limits: - # cpu: 200m - # memory: 256Mi - - # clusterAgent.priorityClassName -- Name of the priorityClass to apply to the Cluster Agent - priorityClassName: # system-cluster-critical - - # clusterAgent.nodeSelector -- Allow the Cluster Agent Deployment to be scheduled on selected nodes - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - nodeSelector: {} - - # clusterAgent.affinity -- Allow the Cluster Agent Deployment to schedule using affinity rules - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - affinity: {} - - # clusterAgent.healthPort -- Port number to use in the Cluster Agent for the healthz endpoint - healthPort: 5555 - - # clusterAgent.livenessProbe -- Override default Cluster Agent liveness probe settings - # @default -- Every 15s / 6 KO / 1 OK - livenessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # clusterAgent.readinessProbe -- Override default Cluster Agent readiness probe settings - # @default -- Every 15s / 6 KO / 1 OK - readinessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # clusterAgent.strategy -- Allow the Cluster Agent deployment to perform a rolling update on helm update - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - - # clusterAgent.podAnnotations -- Annotations to add to the cluster-agents's pod(s) - podAnnotations: {} - # key: "value" - - # clusterAgent.useHostNetwork -- Bind ports on the hostNetwork - ## Useful for CNI networking where hostPort might - ## not be supported. The ports need to be available on all hosts. It can be - ## used for custom metrics instead of a service endpoint. - ## - ## WARNING: Make sure that hosts using this are properly firewalled otherwise - ## metrics and traces are accepted from any host able to connect to this host. - # - useHostNetwork: # true - - # clusterAgent.dnsConfig -- Specify dns configuration options for datadog cluster agent containers e.g ndots - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config - dnsConfig: {} - # options: - # - name: ndots - # value: "1" - - # clusterAgent.volumes -- Specify additional volumes to mount in the cluster-agent container - volumes: [] - # - hostPath: - # path: - # name: - - # clusterAgent.volumeMounts -- Specify additional volumes to mount in the cluster-agent container - volumeMounts: [] - # - name: - # mountPath: - # readOnly: true - - # clusterAgent.datadog_cluster_yaml -- Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml) - datadog_cluster_yaml: {} - - # clusterAgent.createPodDisruptionBudget -- Create pod disruption budget for Cluster Agent deployments - createPodDisruptionBudget: false - - networkPolicy: - # clusterAgent.networkPolicy.create -- If true, create a NetworkPolicy for the cluster agent. - # DEPRECATED. Use datadog.networkPolicy.create instead - create: false - - # clusterAgent.additionalLabels -- Adds labels to the Cluster Agent deployment and pods - additionalLabels: {} - # key: "value" - -agents: - # agents.enabled -- You should keep Datadog DaemonSet enabled! - ## The exceptional case could be a situation when you need to run - ## single Datadog pod per every namespace, but you do not need to - ## re-create a DaemonSet for every non-default namespace install. - ## Note: StatsD and DogStatsD work over UDP, so you may not - ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! - # - enabled: true - - ## Define the Datadog image to work with - image: - # agents.image.name -- Datadog Agent image name to use (relative to `registry`) - ## use "dogstatsd" for Standalone Datadog Agent DogStatsD 7 - name: agent - - # agents.image.tag -- Define the Agent version to use - ## Use 7-jmx to enable jmx fetch collection - tag: 7.25.0 - - # agents.image.repository -- Override default registry + image.name for Agent - repository: - - # agents.image.doNotCheckTag -- Skip the version<>chart compatibility check - ## By default, the version passed in agents.image.tag is checked - ## for compatibility with the version of the chart. - ## This boolean permits to completely skip this check. - ## This is useful, for example, for custom tags that are not - ## respecting semantic versioning - doNotCheckTag: # false - - # agents.image.pullPolicy -- Datadog Agent image pull policy - pullPolicy: IfNotPresent - - # agents.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) - ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - pullSecrets: [] - # - name: "" - - ## Provide Daemonset RBAC configuration - rbac: - # agents.rbac.create -- If true, create & use RBAC resources - create: true - - # agents.rbac.serviceAccountName -- Specify service account name to use (usually pre-existing, created if create is true) - serviceAccountName: default - - ## Provide Daemonset PodSecurityPolicy configuration - podSecurity: - podSecurityPolicy: - # agents.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Agent pods - create: false - - securityContextConstraints: - # agents.podSecurity.securityContextConstraints.create -- If true, create a SecurityContextConstraints resource for Agent pods - create: false - - # agents.podSecurity.seLinuxContext -- Provide seLinuxContext configuration for PSP/SCC - # @default -- Must run as spc_t - seLinuxContext: - rule: MustRunAs - seLinuxOptions: - user: system_u - role: system_r - type: spc_t - level: s0 - - # agents.podSecurity.privileged -- If true, Allow to run privileged containers - privileged: false - - # agents.podSecurity.capabilites -- Allowed capabilites - capabilites: - - SYS_ADMIN - - SYS_RESOURCE - - SYS_PTRACE - - NET_ADMIN - - NET_BROADCAST - - IPC_LOCK - - AUDIT_CONTROL - - AUDIT_READ - - # agents.podSecurity.volumes -- Allowed volumes types - volumes: - - configMap - - downwardAPI - - emptyDir - - hostPath - - secret - - # agents.podSecurity.seccompProfiles -- Allowed seccomp profiles - seccompProfiles: - - "runtime/default" - - "localhost/system-probe" - - # agents.podSecurity.apparmorProfiles -- Allowed apparmor profiles - apparmorProfiles: - - "runtime/default" - - "unconfined" - - apparmor: - # agents.podSecurity.apparmor.enabled -- If true, enable apparmor enforcement - ## see: https://kubernetes.io/docs/tutorials/clusters/apparmor/ - enabled: true - - containers: - agent: - # agents.containers.agent.env -- Additional environment variables for the agent container - env: [] - - # agents.containers.agent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off - ## If not set, fall back to the value of datadog.logLevel. - logLevel: # INFO - - # agents.containers.agent.resources -- Resource requests and limits for the agent container. - resources: {} - # requests: - # cpu: 200m - # memory: 256Mi - # limits: - # cpu: 200m - # memory: 256Mi - - # agents.containers.agent.healthPort -- Port number to use in the node agent for the healthz endpoint - healthPort: 5555 - - # agents.containers.agent.livenessProbe -- Override default agent liveness probe settings - # @default -- Every 15s / 6 KO / 1 OK - livenessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # agents.containers.agent.readinessProbe -- Override default agent readiness probe settings - # @default -- Every 15s / 6 KO / 1 OK - readinessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # agents.containers.agent.securityContext -- Allows you to overwrite the default container SecurityContext for the agent container. - securityContext: {} - - # agents.containers.agent.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - - processAgent: - # agents.containers.processAgent.env -- Additional environment variables for the process-agent container - env: [] - - # agents.containers.processAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off - ## If not set, fall back to the value of datadog.logLevel. - logLevel: # INFO - - # agents.containers.processAgent.resources -- Resource requests and limits for the process-agent container - resources: {} - # requests: - # cpu: 100m - # memory: 200Mi - # limits: - # cpu: 100m - # memory: 200Mi - - # agents.containers.processAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the process-agent container. - securityContext: {} - - # agents.containers.processAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - - traceAgent: - # agents.containers.traceAgent.env -- Additional environment variables for the trace-agent container - env: - - # agents.containers.traceAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off - logLevel: # INFO - - # agents.containers.traceAgent.resources -- Resource requests and limits for the trace-agent container - resources: {} - # requests: - # cpu: 100m - # memory: 200Mi - # limits: - # cpu: 100m - # memory: 200Mi - - # agents.containers.traceAgent.livenessProbe -- Override default agent liveness probe settings - # @default -- Every 15s - livenessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - - # agents.containers.traceAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the trace-agent container. - securityContext: {} - - # agents.containers.traceAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - - systemProbe: - # agents.containers.systemProbe.env -- Additional environment variables for the system-probe container - env: [] - - # agents.containers.systemProbe.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. - ## If not set, fall back to the value of datadog.logLevel. - logLevel: # INFO - - # agents.containers.systemProbe.resources -- Resource requests and limits for the system-probe container - resources: {} - # requests: - # cpu: 100m - # memory: 200Mi - # limits: - # cpu: 100m - # memory: 200Mi - - # agents.containers.systemProbe.securityContext -- Allows you to overwrite the default container SecurityContext for the system-probe container. - securityContext: - privileged: false - capabilities: - add: ["SYS_ADMIN", "SYS_RESOURCE", "SYS_PTRACE", "NET_ADMIN", "NET_BROADCAST", "IPC_LOCK"] - - # agents.containers.systemProbe.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - - securityAgent: - # agents.containers.securityAgent.env -- Additional environment variables for the security-agent container - env: - - # agents.containers.securityAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off - ## If not set, fall back to the value of datadog.logLevel. - logLevel: # INFO - - # agents.containers.securityAgent.resources -- Resource requests and limits for the security-agent container - resources: {} - # requests: - # cpu: 100m - # memory: 200Mi - # limits: - # cpu: 100m - # memory: 200Mi - - # agents.containers.securityAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - - initContainers: - # agents.containers.initContainers.resources -- Resource requests and limits for the init containers - resources: {} - # requests: - # cpu: 100m - # memory: 200Mi - # limits: - # cpu: 100m - # memory: 200Mi - - # agents.volumes -- Specify additional volumes to mount in the dd-agent container - volumes: [] - # - hostPath: - # path: - # name: - - # agents.volumeMounts -- Specify additional volumes to mount in all containers of the agent pod - volumeMounts: [] - # - name: - # mountPath: - # readOnly: true - - # agents.useHostNetwork -- Bind ports on the hostNetwork - ## Useful for CNI networking where hostPort might - ## not be supported. The ports need to be available on all hosts. It Can be - ## used for custom metrics instead of a service endpoint. - ## - ## WARNING: Make sure that hosts using this are properly firewalled otherwise - ## metrics and traces are accepted from any host able to connect to this host. - useHostNetwork: false - - # agents.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config - dnsConfig: {} - # options: - # - name: ndots - # value: "1" - - # agents.podAnnotations -- Annotations to add to the DaemonSet's Pods - podAnnotations: {} - # : '[{"key": "", "value": ""}]' - - # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) - tolerations: [] - - # agents.nodeSelector -- Allow the DaemonSet to schedule on selected nodes - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - nodeSelector: {} - - # agents.affinity -- Allow the DaemonSet to schedule using affinity rules - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - affinity: {} - - # agents.updateStrategy -- Allow the DaemonSet to perform a rolling update on helm update - ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: "10%" - - # agents.priorityClassName -- Sets PriorityClassName if defineds - priorityClassName: - - # agents.podLabels -- Sets podLabels if defined - # Note: These labels are also used as label selectors so they are immutable. - podLabels: {} - - # agents.additionalLabels -- Adds labels to the Agent daemonset and pods - additionalLabels: {} - # key: "value" - - # agents.useConfigMap -- Configures a configmap to provide the agent configuration. Use this in combination with the `agents.customAgentConfig` parameter. - useConfigMap: # false - - # agents.customAgentConfig -- Specify custom contents for the datadog agent config (datadog.yaml) - ## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 - ## ref: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml - ## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account. - customAgentConfig: {} - # # Autodiscovery for Kubernetes - # listeners: - # - name: kubelet - # config_providers: - # - name: kubelet - # polling: true - # # needed to support legacy docker label config templates - # - name: docker - # polling: true - # - # # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration - # apm_config: - # enabled: false - # apm_non_local_traffic: true - # - # # Enable java cgroup handling. Only one of those options should be enabled, - # # depending on the agent version you are using along that chart. - # - # # agent version < 6.15 - # # jmx_use_cgroup_memory_limit: true - # - # # agent version >= 6.15 - # # jmx_use_container_support: true - - networkPolicy: - # agents.networkPolicy.create -- If true, create a NetworkPolicy for the agents. - # DEPRECATED. Use datadog.networkPolicy.create instead - create: false - -clusterChecksRunner: - # clusterChecksRunner.enabled -- If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents. - ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ - enabled: false - - ## Define the Datadog image to work with. - image: - # clusterChecksRunner.image.name -- Datadog Agent image name to use (relative to `registry`) - name: agent - - # clusterChecksRunner.image.tag -- Define the Agent version to use - ## Use 7-jmx to enable jmx fetch collection - tag: 7.25.0 - - # clusterChecksRunner.image.repository -- Override default registry + image.name for Cluster Check Runners - repository: - - # clusterChecksRunner.image.pullPolicy -- Datadog Agent image pull policy - pullPolicy: IfNotPresent - - # clusterChecksRunner.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) - ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - pullSecrets: [] - # - name: "" - - # clusterChecksRunner.createPodDisruptionBudget -- Create the pod disruption budget to apply to the cluster checks agents - createPodDisruptionBudget: false - - # Provide Cluster Checks Deployment pods RBAC configuration - rbac: - # clusterChecksRunner.rbac.create -- If true, create & use RBAC resources - create: true - - # clusterChecksRunner.rbac.dedicated -- If true, use a dedicated RBAC resource for the cluster checks agent(s) - dedicated: false - - # clusterChecksRunner.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true - serviceAccountAnnotations: {} - - # clusterChecksRunner.rbac.serviceAccountName -- Specify service account name to use (usually pre-existing, created if create is true) - serviceAccountName: default - - # clusterChecksRunner.replicas -- Number of Cluster Checks Runner instances - ## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2. - ## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks. - replicas: 2 - - # clusterChecksRunner.resources -- Datadog clusterchecks-agent resource requests and limits. - resources: {} - # requests: - # cpu: 200m - # memory: 500Mi - # limits: - # cpu: 200m - # memory: 500Mi - - # clusterChecksRunner.affinity -- Allow the ClusterChecks Deployment to schedule using affinity rules. - ## By default, ClusterChecks Deployment Pods are forced to run on different Nodes. - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - affinity: {} - - # clusterChecksRunner.strategy -- Allow the ClusterChecks deployment to perform a rolling update on helm update - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - - # clusterChecksRunner.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config - dnsConfig: {} - # options: - # - name: ndots - # value: "1" - - # clusterChecksRunner.nodeSelector -- Allow the ClusterChecks Deployment to schedule on selected nodes - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - # clusterChecksRunner.tolerations -- Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - # - tolerations: [] - - # clusterChecksRunner.healthPort -- Port number to use in the Cluster Checks Runner for the healthz endpoint - healthPort: 5555 - - # clusterChecksRunner.livenessProbe -- Override default agent liveness probe settings - # @default -- Every 15s / 6 KO / 1 OK - ## In case of issues with the probe, you can disable it with the - ## following values, to allow easier investigating: - # - # livenessProbe: - # exec: - # command: ["/bin/true"] - # - livenessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # clusterChecksRunner.readinessProbe -- Override default agent readiness probe settings - # @default -- Every 15s / 6 KO / 1 OK - ## In case of issues with the probe, you can disable it with the - ## following values, to allow easier investigating: - # - # readinessProbe: - # exec: - # command: ["/bin/true"] - # - readinessProbe: - initialDelaySeconds: 15 - periodSeconds: 15 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - # clusterChecksRunner.podAnnotations -- Annotations to add to the cluster-checks-runner's pod(s) - podAnnotations: {} - # key: "value" - - # clusterChecksRunner.env -- Environment variables specific to Cluster Checks Runner - ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#environment-variables - env: [] - # - name: - # value: - - # clusterChecksRunner.volumes -- Specify additional volumes to mount in the cluster checks container - volumes: [] - # - hostPath: - # path: - # name: - - # clusterChecksRunner.volumeMounts -- Specify additional volumes to mount in the cluster checks container - volumeMounts: [] - # - name: - # mountPath: - # readOnly: true - - networkPolicy: - # clusterChecksRunner.networkPolicy.create -- If true, create a NetworkPolicy for the cluster checks runners. - # DEPRECATED. Use datadog.networkPolicy.create instead - create: false - - # clusterChecksRunner.additionalLabels -- Adds labels to the cluster checks runner deployment and pods - additionalLabels: {} - # key: "value" - - # clusterChecksRunner.securityContext -- Allows you to overwrite the default PodSecurityContext on the clusterchecks pods. - securityContext: {} - - # clusterChecksRunner.ports -- Allows to specify extra ports (hostPorts for instance) for this container - ports: [] - -datadog-crds: - crds: - # datadog-crds.crds.datadogMetrics -- Set to true to deploy the DatadogMetrics CRD - datadogMetrics: true - -kube-state-metrics: - rbac: - # kube-state-metrics.rbac.create -- If true, create & use RBAC resources - create: true - - serviceAccount: - # kube-state-metrics.serviceAccount.create -- If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true - create: true - - # kube-state-metrics.serviceAccount.name -- The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - name: - - # kube-state-metrics.resources -- Resource requests and limits for the kube-state-metrics container. - resources: {} - # requests: - # cpu: 200m - # memory: 256Mi - # limits: - # cpu: 200m - # memory: 256Mi - - # kube-state-metrics.nodeSelector -- Node selector for KSM. KSM only supports Linux. - nodeSelector: - kubernetes.io/os: linux diff --git a/examples/optional-components-k8s/main.tf b/examples/optional-components-k8s/main.tf deleted file mode 100644 index edadcfe..0000000 --- a/examples/optional-components-k8s/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -module "infrastructure-terraform-eks" { - source = "../.." - - aws_region = "ap-southeast-1" - tfenv = "example-testing-basic" - root_domain_name = "basic.example.com" -} - -module "datadog-dashboard" { - source = "./datadog-dashboard" - depends_on = [module.infrastructure-terraform-eks] - - app_namespace = "technology-system" - tfenv = "example-testing-basic" - datadog_apikey = var.datadog_serviceacount_apikey -} \ No newline at end of file diff --git a/examples/optional-components-k8s/providers.tf b/examples/optional-components-k8s/providers.tf deleted file mode 100644 index 2a5ccf3..0000000 --- a/examples/optional-components-k8s/providers.tf +++ /dev/null @@ -1,3 +0,0 @@ -provider "aws" { - region = var.aws_region -} \ No newline at end of file diff --git a/examples/optional-components-k8s/vars.tf b/examples/optional-components-k8s/vars.tf deleted file mode 100644 index 5c01731..0000000 --- a/examples/optional-components-k8s/vars.tf +++ /dev/null @@ -1,13 +0,0 @@ -variable "gitlab_token" { - description = "Gitlab Token" -} - -variable "aws_region" { - description = "AWS Region for Provisioning" - default = "ap-southeast-1" -} - -variable "datadog_serviceacount_apikey" { - description = "Datadog API Key for integration with Cluster" - default = "" -} \ No newline at end of file diff --git a/examples/standard/data.tf b/examples/standard/data.tf new file mode 100644 index 0000000..d78fce4 --- /dev/null +++ b/examples/standard/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} \ No newline at end of file diff --git a/examples/standard/providers.tf b/examples/standard/providers.tf index 2a5ccf3..258df56 100644 --- a/examples/standard/providers.tf +++ b/examples/standard/providers.tf @@ -1,3 +1,19 @@ -provider "aws" { - region = var.aws_region +variable "gitlab_token" { + description = "Gitlab Token" + default = "" +} + +variable "aws_region" { + description = "AWS Region for Provisioning" + default = "ap-southeast-1" +} + +variable "aws_region_secondary" { + description = "Secondary AWS Region for Provisioning" + default = "eu-west-1" +} + +variable "tech_email" { + description = "Tech Email for Contact" + default = "tech@example.com" } \ No newline at end of file diff --git a/kubernetes-helm.tf b/kubernetes-helm.tf new file mode 100644 index 0000000..e785d46 --- /dev/null +++ b/kubernetes-helm.tf @@ -0,0 +1,130 @@ +module "nginx-controller-ingress" { + source = "./provisioning/kubernetes/nginx-controller" + depends_on = [module.eks, resource.aws_route53_zone.hosted_zone] + count = var.helm_installations.ingress ? 1 : 0 + + cluster_root_domain = var.cluster_root_domain + app_namespace = var.app_namespace + app_name = var.app_name + tfenv = var.tfenv + infrastructure_eks_terraform_version = local.module_version + billingcustomer = var.billingcustomer + + custom_manifest = try(var.helm_configurations.ingress.nginx_values, null) + ingress_records = var.cluster_root_domain.ingress_records != null ? var.cluster_root_domain.ingress_records : [] +} + +module "certmanager" { + source = "./provisioning/kubernetes/certmanager" + depends_on = [module.eks] + count = var.helm_installations.ingress ? 1 : 0 + + custom_manifest = try(var.helm_configurations.ingress.certmanager_values, null) + letsencrypt_email = var.tech_email +} + +module "kubernetes-dashboard" { + source = "./provisioning/kubernetes/kubernetes-dashboard" + depends_on = [module.eks] + count = var.helm_installations.dashboard ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv + + custom_manifest = var.helm_configurations.dashboard +} + +module "consul" { + source = "./provisioning/kubernetes/hashicorp-consul" + depends_on = [module.eks] + count = var.helm_installations.vault_consul ? 1 : 0 + + vault_nodeselector = try(var.helm_configurations.vault_consul.vault_nodeselector, "") != null ? var.helm_configurations.vault_consul.vault_nodeselector : "" + vault_tolerations = try(var.helm_configurations.vault_consul.vault_tolerations, "") != null ? var.helm_configurations.vault_consul.vault_tolerations : "" + + app_namespace = var.app_namespace + tfenv = var.tfenv + root_domain_name = var.cluster_root_domain.name + app_name = var.app_name + tags = local.base_tags +} +module "vault" { + source = "./provisioning/kubernetes/hashicorp-vault" + depends_on = [module.eks] + count = var.helm_installations.vault_consul ? 1 : 0 + + vault_nodeselector = try(var.helm_configurations.vault_consul.vault_nodeselector, "") != null ? var.helm_configurations.vault_consul.vault_nodeselector : "" + vault_tolerations = try(var.helm_configurations.vault_consul.vault_tolerations, "") != null ? var.helm_configurations.vault_consul.vault_tolerations : "" + app_namespace = var.app_namespace + tfenv = var.tfenv + root_domain_name = var.cluster_root_domain.name + app_name = var.app_name + billingcustomer = var.billingcustomer + aws_region = var.aws_region + enable_aws_vault_unseal = try(var.helm_configurations.vault_consul.enable_aws_vault_unseal, false) != null ? var.helm_configurations.vault_consul.enable_aws_vault_unseal : false + tags = local.base_tags + + custom_manifest = var.helm_configurations.vault_consul +} + +module "elastic-stack" { + source = "./provisioning/kubernetes/elastic-stack" + depends_on = [module.eks] + count = var.helm_installations.elasticstack ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv + root_domain_name = var.cluster_root_domain.name + google_clientID = var.google_clientID + google_clientSecret = var.google_clientSecret + google_authDomain = var.google_authDomain + billingcustomer = var.billingcustomer + app_name = var.app_name + aws_region = var.aws_region + tags = local.base_tags +} + +module "grafana" { + source = "./provisioning/kubernetes/grafana" + depends_on = [module.eks] + count = var.helm_installations.grafana ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv + root_domain_name = var.cluster_root_domain.name + google_clientID = var.google_clientID + google_clientSecret = var.google_clientSecret + google_authDomain = var.google_authDomain + + custom_manifest = var.helm_configurations.grafana +} + +module "argocd" { + source = "./provisioning/kubernetes/argocd" + count = var.helm_installations.argocd ? 1 : 0 + depends_on = [module.eks] + + root_domain_name = var.cluster_root_domain.name + operator_domain_name = var.operator_domain_name + hosted_zone_id = aws_route53_zone.hosted_zone[0].zone_id + custom_manifest = var.helm_configurations.argocd + repository_secrets = var.helm_configurations.argocd.repository_secrets + credential_templates = var.helm_configurations.argocd.credential_templates + registry_secrets = var.helm_configurations.argocd.registry_secrets + generate_plugin_repository_secret = var.helm_configurations.argocd.generate_plugin_repository_secret +} + +# module "gitlab_runner" { +# source = "./provisioning/kubernetes/gitlab-runner" +# depends_on = [module.namespaces, module.eks-vpc] +# count = var.helm_installations.gitlab_runner ? 1 : 0 + +# app_name = var.app_name +# app_namespace = var.app_namespace +# tfenv = var.tfenv +# aws_region = var.aws_region +# gitlab_serviceaccount_id = var.gitlab_serviceaccount_id +# gitlab_serviceaccount_secret = var.gitlab_serviceaccount_secret +# gitlab_runner_concurrent_agents = var.gitlab_runner_concurrent_agents +# gitlab_runner_registration_token = var.gitlab_runner_registration_token +# } diff --git a/kubernetes-namespaces.tf b/kubernetes-namespaces.tf new file mode 100644 index 0000000..85fae63 --- /dev/null +++ b/kubernetes-namespaces.tf @@ -0,0 +1,14 @@ +resource "kubernetes_namespace" "cluster" { + depends_on = [module.eks] + for_each = toset(local.namespaces) + + metadata { + labels = { + name = each.key + "Terraform" = true + "eks/name" = local.name_prefix + "eks/environment" = var.tfenv + } + name = each.key + } +} \ No newline at end of file diff --git a/kubernetes-secret.tf b/kubernetes-secret.tf new file mode 100644 index 0000000..5e4e6fb --- /dev/null +++ b/kubernetes-secret.tf @@ -0,0 +1,41 @@ +data "aws_ssm_parameter" "regcred_username" { + for_each = { + for regcred in var.registry_credentials : "${regcred.name}-${regcred.namespace}" => regcred + if regcred.secrets_store == "ssm" + } + + name = each.value.docker_username +} + +data "aws_ssm_parameter" "regcred_password" { + for_each = { + for regcred in var.registry_credentials : "${regcred.name}-${regcred.namespace}" => regcred + if regcred.secrets_store == "ssm" + } + + name = each.value.docker_password +} + +resource "kubernetes_secret" "regcred" { + for_each = { for regcred in var.registry_credentials : "${regcred.name}-${regcred.namespace}" => regcred } + + metadata { + name = each.value.name + namespace = each.value.namespace + } + + data = { + ".dockerconfigjson" = sensitive(jsonencode({ + auths = { + "${each.value.docker_server}" = { + "username" = each.value.secrets_store != "ssm" ? each.value.docker_username : data.aws_ssm_parameter.regcred_username["${each.value.name}-${each.value.namespace}"].value + "password" = each.value.secrets_store != "ssm" ? each.value.docker_password : data.aws_ssm_parameter.regcred_password["${each.value.name}-${each.value.namespace}"].value + "email" = each.value.docker_email + "auth" = base64encode("${each.value.secrets_store != "ssm" ? each.value.username : data.aws_ssm_parameter.regcred_username["${each.value.name}-${each.value.namespace}"].value}:${each.value.secrets_store != "ssm" ? each.value.docker_password : data.aws_ssm_parameter.regcred_password["${each.value.name}-${each.value.namespace}"].value}") + } + } + })) + } + + type = "kubernetes.io/dockerconfigjson" +} diff --git a/kubernetes.tf b/kubernetes.tf deleted file mode 100644 index 8153075..0000000 --- a/kubernetes.tf +++ /dev/null @@ -1,195 +0,0 @@ -module "namespaces" { - source = "./provisioning/kubernetes/namespaces" - depends_on = [module.eks, aws_eks_node_group.custom_node_group] - - helm_installations = var.helm_installations -} - -module "nginx-controller-ingress" { - source = "./provisioning/kubernetes/nginx-controller" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces] - count = var.helm_installations.ingress ? 1 : 0 - - root_domain_name = var.root_domain_name - app_namespace = var.app_namespace - app_name = var.app_name - tfenv = var.tfenv - infrastructure_eks_terraform_version = local.module_version - billingcustomer = var.billingcustomer -} - -module "certmanager" { - source = "./provisioning/kubernetes/certmanager" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress] - - count = var.helm_installations.ingress ? 1 : 0 - letsencrypt_email = var.letsencrypt_email -} - -module "aws-support" { - source = "./provisioning/kubernetes/aws-support" - depends_on = [module.eks, module.eks-vpc, module.subnet_addrs] - - vpc_id = module.eks-vpc.vpc_id - cidr_blocks = module.eks-vpc.private_subnets_cidr_blocks - oidc_url = module.eks.cluster_oidc_issuer_url - account_id = data.aws_caller_identity.current.account_id - aws_region = var.aws_region - app_name = var.app_name - app_namespace = var.app_namespace - tfenv = var.tfenv - base_cidr_block = module.subnet_addrs.base_cidr_block - billingcustomer = var.billingcustomer - node_count = length(var.managed_node_groups) > 0 ? var.managed_node_groups[0].min_capacity : var.instance_min_size - tags = local.tags -} - -module "aws-cluster-autoscaler" { - source = "./provisioning/kubernetes/cluster-autoscaler" - depends_on = [module.eks, aws_eks_node_group.custom_node_group] - - app_name = var.app_name - app_namespace = var.app_namespace - tfenv = var.tfenv - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - aws_region = var.aws_region - scale_down_util_threshold = var.aws_autoscaler_scale_down_util_threshold - skip_nodes_with_local_storage = var.aws_autoscaler_skip_nodes_with_local_storage - skip_nodes_with_system_pods = var.aws_autoscaler_skip_nodes_with_system_pods - cordon_node_before_term = var.aws_autoscaler_cordon_node_before_term - tags = local.tags -} - -module "kubernetes-dashboard" { - source = "./provisioning/kubernetes/kubernetes-dashboard" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces] - - app_namespace = var.app_namespace - tfenv = var.tfenv -} - -module "vault" { - source = "./provisioning/kubernetes/hashicorp-vault" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.vault_consul ? 1 : 0 - - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations - app_namespace = var.app_namespace - tfenv = var.tfenv - root_domain_name = var.root_domain_name - app_name = var.app_name - billingcustomer = var.billingcustomer - aws_region = var.aws_region - enable_aws_vault_unseal = var.enable_aws_vault_unseal - tags = local.tags -} - -module "vault-secrets-webhook" { - source = "./provisioning/kubernetes/bonzai-vault-secrets-webhook" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.vault_consul ? 1 : 0 - - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations - app_namespace = var.app_namespace - tfenv = var.tfenv -} - -module "vault-operator" { - source = "./provisioning/kubernetes/bonzai-vault-operator" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.vault_consul ? 1 : 0 - - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations - app_namespace = var.app_namespace - tfenv = var.tfenv -} - -module "consul" { - source = "./provisioning/kubernetes/hashicorp-consul" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.vault_consul ? 1 : 0 - - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations - app_namespace = var.app_namespace - tfenv = var.tfenv - root_domain_name = var.root_domain_name - app_name = var.app_name -} - -module "elastic-stack" { - source = "./provisioning/kubernetes/elastic-stack" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.elasticstack ? 1 : 0 - - app_namespace = var.app_namespace - tfenv = var.tfenv - root_domain_name = var.root_domain_name - google_clientID = var.google_clientID - google_clientSecret = var.google_clientSecret - google_authDomain = var.google_authDomain - billingcustomer = var.billingcustomer - app_name = var.app_name - aws_region = var.aws_region - tags = local.tags -} - -module "stakater-reloader" { - source = "./provisioning/kubernetes/stakater-reloader" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.stakater_reloader ? 1 : 0 - - app_namespace = var.app_namespace - tfenv = var.tfenv -} - -module "metrics-server" { - source = "./provisioning/kubernetes/metrics-server" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.metrics_server ? 1 : 0 - - app_namespace = var.app_namespace - tfenv = var.tfenv -} - -module "grafana" { - source = "./provisioning/kubernetes/grafana" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] - count = var.helm_installations.grafana ? 1 : 0 - - app_namespace = var.app_namespace - tfenv = var.tfenv - root_domain_name = var.root_domain_name - google_clientID = var.google_clientID - google_clientSecret = var.google_clientSecret - google_authDomain = var.google_authDomain -} - -module "gitlab-k8s-agent" { - source = "./provisioning/kubernetes/gitlab-kubernetes-agent" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces] - count = var.helm_installations.gitlab_k8s_agent ? 1 : 0 - - app_namespace = var.app_namespace - tfenv = var.tfenv - gitlab_agent_url = var.gitlab_kubernetes_agent_config.gitlab_agent_url - gitlab_agent_secret = var.gitlab_kubernetes_agent_config.gitlab_agent_secret -} - -# module "gitlab_runner" { -# source = "./provisioning/kubernetes/gitlab-runner" -# depends_on = [module.namespaces, module.eks, module.eks-vpc] -# count = var.helm_installations.gitlab_runner ? 1 : 0 - -# app_name = var.app_name -# app_namespace = var.app_namespace -# tfenv = var.tfenv -# aws_region = var.aws_region -# gitlab_serviceaccount_id = var.gitlab_serviceaccount_id -# gitlab_serviceaccount_secret = var.gitlab_serviceaccount_secret -# gitlab_runner_concurrent_agents = var.gitlab_runner_concurrent_agents -# gitlab_runner_registration_token = var.gitlab_runner_registration_token -# } diff --git a/locals.tf b/locals.tf index 43b0ca1..08414f9 100644 --- a/locals.tf +++ b/locals.tf @@ -3,24 +3,24 @@ locals { } locals { + name_prefix = var.cluster_name != "" ? var.cluster_name : "${var.app_name}-${var.app_namespace}-${var.tfenv}" base_tags = { - Environment = var.tfenv - Terraform = "true" - Namespace = var.app_namespace - Billingcustomer = var.billingcustomer - Product = var.app_name - infrastructure-eks-terraform = local.module_version + Environment = var.tfenv + Terraform = "true" + Version = local.module_version + Namespace = var.app_namespace + Billingcustomer = var.billingcustomer + Product = var.app_name + terraform-aws-infrastructure-eks = local.module_version } - non_vpc_tags = { - Name = "${var.app_name}-${var.app_namespace}-${var.tfenv}" - } - kubernetes_tags = { + kubernetes_tags = merge({ + Name = "${var.app_name}-${var.app_namespace}-${var.tfenv}" "k8s.io/cluster-autoscaler/enabled" = true "k8s.io/cluster-autoscaler/${var.app_name}-${var.app_namespace}-${var.tfenv}" = true - } - - tags = merge(local.base_tags, local.non_vpc_tags, var.extra_tags) - vpc_tags = merge(local.base_tags, var.extra_tags) + }, local.base_tags) + additional_kubernetes_tags = merge({ + Name = "${var.app_name}-${var.app_namespace}-${var.tfenv}" + }, local.base_tags) default_node_group = { core = { @@ -35,15 +35,16 @@ locals { k8s_labels = { Environment = var.tfenv } - tags = merge(local.kubernetes_tags, local.tags) - additional_tags = local.tags + tags = local.kubernetes_tags + additional_tags = local.additional_kubernetes_tags } } - default_aws_auth_roles = [ + aws_auth_roles = [ + for x in module.eks_managed_node_group : { - "groups" : ["system:bootstrappers", "system:nodes"], - "rolearn" : module.eks.worker_iam_role_arn, + "groups" : ["system:bootstrappers", "system:nodes"] + "rolearn" : "${x.iam_role_arn}" "username" : "system:node:{{EC2PrivateDNSName}}" } ] @@ -51,25 +52,31 @@ locals { base_cidr = var.vpc_subnet_configuration.autogenerate ? format(var.vpc_subnet_configuration.base_cidr, random_integer.cidr_vpc[0].result) : var.vpc_subnet_configuration.base_cidr nat_gateway_configuration = var.nat_gateway_custom_configuration.enabled ? { - "enable_nat_gateway" = var.nat_gateway_custom_configuration.enable_nat_gateway - "enable_dns_hostnames" = var.nat_gateway_custom_configuration.enable_dns_hostnames - "single_nat_gateway" = var.nat_gateway_custom_configuration.single_nat_gateway - "one_nat_gateway_per_az" = var.nat_gateway_custom_configuration.one_nat_gateway_per_az - # reuse_nat_ips = true - # external_nat_ip_ids = [aws_eip.nat_gw_elastic_ip.id] + "enable_nat_gateway" = var.nat_gateway_custom_configuration.enable_nat_gateway + "enable_dns_hostnames" = var.nat_gateway_custom_configuration.enable_dns_hostnames + "single_nat_gateway" = var.nat_gateway_custom_configuration.single_nat_gateway + "one_nat_gateway_per_az" = var.nat_gateway_custom_configuration.one_nat_gateway_per_az + "reuse_nat_ips" = var.elastic_ip_custom_configuration.enabled ? var.elastic_ip_custom_configuration.reuse_nat_ips : false + "external_nat_ip_ids" = var.elastic_ip_custom_configuration.enabled ? var.elastic_ip_custom_configuration.external_nat_ip_ids : [] "enable_vpn_gateway" = var.nat_gateway_custom_configuration.enable_vpn_gateway "propagate_public_route_tables_vgw" = var.nat_gateway_custom_configuration.enable_vpn_gateway } : { - enable_nat_gateway = true - enable_dns_hostnames = true - single_nat_gateway = var.tfenv == "prod" ? false : true - one_nat_gateway_per_az = false - # reuse_nat_ips = true - # external_nat_ip_ids = [aws_eip.nat_gw_elastic_ip.id] + enable_nat_gateway = true + enable_dns_hostnames = true + single_nat_gateway = var.tfenv == "prod" ? false : true + one_nat_gateway_per_az = false + reuse_nat_ips = var.elastic_ip_custom_configuration.enabled ? var.elastic_ip_custom_configuration.reuse_nat_ips : false + external_nat_ip_ids = var.elastic_ip_custom_configuration.enabled ? var.elastic_ip_custom_configuration.external_nat_ip_ids : [] enable_vpn_gateway = false propagate_public_route_tables_vgw = false } + namespaces = concat( + var.custom_namespaces, + ["monitoring"], + (var.helm_installations.vault_consul ? ["hashicorp"] : []), + (var.helm_installations.argocd ? ["argocd"] : []) + ) } resource "random_integer" "cidr_vpc" { diff --git a/outputs.tf b/outputs.tf index 1f2fdb9..59508e6 100644 --- a/outputs.tf +++ b/outputs.tf @@ -2,9 +2,9 @@ ## MODULE: EKS ## ----------- -output "kubecfg" { - value = module.eks.kubeconfig -} +// output "kubecfg" { +// value = module.eks.kubeconfig +// } output "kubernetes-cluster-certificate-authority-data" { value = module.eks.cluster_certificate_authority_data } @@ -17,6 +17,11 @@ output "kubernetes-cluster-endpoint" { value = module.eks.cluster_endpoint } +output "kubernetes-cluster-auth" { + value = data.aws_eks_cluster_auth.cluster + sensitive = true +} + ## ----------- ## MODULE: VPC ## ----------- @@ -56,4 +61,19 @@ output "public_subnets_cidr_blocks" { output "base_cidr_block" { value = module.subnet_addrs.base_cidr_block -} \ No newline at end of file +} + +output "eks_managed_node_groups" { + value = module.eks.eks_managed_node_groups +} + +## ----------- +### Region and AWS Profile Checks +## ----------- +output "aws_region" { + value = var.aws_region +} + +output "aws_profile" { + value = var.aws_profile +} diff --git a/provider.tf b/provider.tf new file mode 100644 index 0000000..29f3807 --- /dev/null +++ b/provider.tf @@ -0,0 +1,9 @@ +provider "aws" { + region = var.aws_region + profile = var.aws_profile +} +provider "aws" { + alias = "secondary" + profile = var.aws_profile + region = var.aws_secondary_region +} \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/argocd.tf b/provisioning/kubernetes/argocd/argocd.tf new file mode 100644 index 0000000..e363bc7 --- /dev/null +++ b/provisioning/kubernetes/argocd/argocd.tf @@ -0,0 +1,41 @@ +resource "helm_release" "argocd" { + name = "argocd" + repository = "https://argoproj.github.io/argo-helm" + chart = "argo-cd" + namespace = "argocd" + create_namespace = false + + ## Default values.yaml + configuration + ## https://github.com/argoproj/argo-helm/blob/master/charts/argo-cd/values.yaml + values = var.custom_manifest != null ? [file(var.custom_manifest.value_file)] : [< secret + if secret.secrets_store == "ssm" + } + + name = each.value.username +} + +data "aws_ssm_parameter" "infrastructure_credentials_password" { + for_each = { + for secret in var.repository_secrets : secret.password => secret + if secret.secrets_store == "ssm" + } + + name = each.value.password +} + +################################## +#### Credential Templates #### +################################## +data "aws_ssm_parameter" "infrastructure_credentials_repository_username" { + for_each = { + for secret in var.credential_templates : secret.username => secret + if secret.secrets_store == "ssm" + } + + name = each.value.username +} + +data "aws_ssm_parameter" "infrastructure_credentials_repository_password" { + for_each = { + for secret in var.credential_templates : secret.password => secret + if secret.secrets_store == "ssm" + } + + name = each.value.password +} + +################################## +#### Registry Secrets #### +################################## +data "aws_ssm_parameter" "infrastructure_credentials_registry_username" { + for_each = { + for secret in var.registry_secrets : secret.username => secret + if secret.secrets_store == "ssm" + } + + name = each.value.username +} + +data "aws_ssm_parameter" "infrastructure_credentials_registry_password" { + for_each = { + for secret in var.registry_secrets : secret.password => secret + if secret.secrets_store == "ssm" + } + + name = each.value.password +} + +data "aws_ssm_parameter" "infrastructure_credentials_registry_auth" { + for_each = { + for secret in var.registry_secrets : secret.auth => secret + if secret.secrets_store == "ssm" + } + + name = each.value.auth +} \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/locals.tf b/provisioning/kubernetes/argocd/locals.tf new file mode 100644 index 0000000..7d8469c --- /dev/null +++ b/provisioning/kubernetes/argocd/locals.tf @@ -0,0 +1,22 @@ +locals { + ## TODO: Secret assumed secrets_store is ssm + helmRepositoryYaml = { + apiVersion = "" + generated = "0001-01-01T00:00:00Z" + repositories = [ + for secret in var.repository_secrets : + { + caFile = "" + certFile = "" + insecure_skip_tls_verify = false + keyFile = "" + pass_credentials_all = false + name = secret.name + url = secret.url + username = secret.secrets_store == "ssm" ? data.aws_ssm_parameter.infrastructure_credentials_username[secret.username].value : "" + password = secret.secrets_store == "ssm" ? data.aws_ssm_parameter.infrastructure_credentials_password[secret.password].value : "" + } + if secret.type == "helm" + ] + } +} diff --git a/provisioning/kubernetes/argocd/registry-secrets.tf b/provisioning/kubernetes/argocd/registry-secrets.tf new file mode 100644 index 0000000..63a34d7 --- /dev/null +++ b/provisioning/kubernetes/argocd/registry-secrets.tf @@ -0,0 +1,26 @@ +resource "kubernetes_secret" "argocd_application_registry_secrets" { + count = length(var.registry_secrets) + + metadata { + name = "registry-${var.registry_secrets[count.index].name}" + namespace = "argocd" + labels = { + "argocd.argoproj.io/secret-type" = "docker-registry" + } + } + + data = { + ".dockerconfigjson" = base64encode(jsonencode({ + "auths" : { + "${var.registry_secrets[count.index].url}" : { + "username" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].username : data.aws_ssm_parameter.infrastructure_credentials_registry_username[var.registry_secrets[count.index].username].value}", + "password" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].password : data.aws_ssm_parameter.infrastructure_credentials_registry_password[var.registry_secrets[count.index].password].value}", + "email" : "${var.registry_secrets[count.index].email}", + "auth" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].auth : data.aws_ssm_parameter.infrastructure_credentials_registry_auth[var.registry_secrets[count.index].auth].value}", + } + } + })) + } + + type = "kubernetes.io/dockerconfigjson" +} \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/repository-credential-templates.tf b/provisioning/kubernetes/argocd/repository-credential-templates.tf new file mode 100644 index 0000000..fde6c11 --- /dev/null +++ b/provisioning/kubernetes/argocd/repository-credential-templates.tf @@ -0,0 +1,17 @@ +resource "kubernetes_secret" "argocd_application_credential_template" { + count = length(var.credential_templates) + + metadata { + name = "argocd-repo-creds-${var.credential_templates[count.index].name}" + namespace = "argocd" + labels = { + "argocd.argoproj.io/secret-type" = "repo-creds" + } + } + + data = { + url = var.credential_templates[count.index].url + username = var.credential_templates[count.index].secrets_store != "ssm" ? var.credential_templates[count.index].username : data.aws_ssm_parameter.infrastructure_credentials_repository_username[var.credential_templates[count.index].username].value + password = var.credential_templates[count.index].secrets_store != "ssm" ? var.credential_templates[count.index].password : data.aws_ssm_parameter.infrastructure_credentials_repository_password[var.credential_templates[count.index].password].value + } +} diff --git a/provisioning/kubernetes/argocd/repository-secrets.tf b/provisioning/kubernetes/argocd/repository-secrets.tf new file mode 100644 index 0000000..50ebcae --- /dev/null +++ b/provisioning/kubernetes/argocd/repository-secrets.tf @@ -0,0 +1,32 @@ +resource "kubernetes_secret" "argocd_application_repository_secrets" { + count = length(var.repository_secrets) + + metadata { + name = "repository-${var.repository_secrets[count.index].name}" + namespace = "argocd" + labels = { + "argocd.argoproj.io/secret-type" = "repository" + } + } + + data = { + name = var.repository_secrets[count.index].name + url = var.repository_secrets[count.index].url + type = var.repository_secrets[count.index].type + username = var.repository_secrets[count.index].secrets_store != "ssm" ? var.repository_secrets[count.index].username : data.aws_ssm_parameter.infrastructure_credentials_username[var.repository_secrets[count.index].username].value + password = var.repository_secrets[count.index].secrets_store != "ssm" ? var.repository_secrets[count.index].password : data.aws_ssm_parameter.infrastructure_credentials_password[var.repository_secrets[count.index].password].value + } +} + +resource "kubernetes_secret" "argocd_helm_envsubst_plugin_repositories" { + count = coalesce(var.generate_plugin_repository_secret, false) ? 1 : 0 + + metadata { + name = "argocd-helm-envsubst-plugin-repositories" + namespace = "argocd" + } + + data = { + "repositories.yaml" = yamlencode(local.helmRepositoryYaml) + } +} diff --git a/provisioning/kubernetes/argocd/variables.tf b/provisioning/kubernetes/argocd/variables.tf new file mode 100644 index 0000000..3a2b261 --- /dev/null +++ b/provisioning/kubernetes/argocd/variables.tf @@ -0,0 +1,10 @@ +variable "custom_manifest" { + default = null +} +variable "root_domain_name" {} +variable "hosted_zone_id" {} +variable "operator_domain_name" {} +variable "repository_secrets" {} +variable "credential_templates" {} +variable "registry_secrets" {} +variable "generate_plugin_repository_secret" {} \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/version.tf b/provisioning/kubernetes/argocd/version.tf new file mode 100644 index 0000000..17e83b3 --- /dev/null +++ b/provisioning/kubernetes/argocd/version.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 1.0" + required_providers { + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.14.0" + } + } +} \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/README.md b/provisioning/kubernetes/aws-support/README.md deleted file mode 100644 index 9442778..0000000 --- a/provisioning/kubernetes/aws-support/README.md +++ /dev/null @@ -1,19 +0,0 @@ -#Post Provisioning EFS Service Account annotation - -Because the eks-efs-csi driver has a bug on its annotation and we cannot use kubernetes_service_account terraform resource to apply the annotation because the service account already exists and we cannot apply an update yet thru terraform. We need to manually apply the annotation to the service account that was created by the efs helm_release. To do that: - -1. First make sure you are in the right context, if the cluster already exists `kubectl config get-clusters` then copy the cluster to `kubectl config use-context `. This will connect you to the cluster you want to apply. If you have not added the context yet, you can do `aws eks update-kubeconfig --name --profile --region` to add the cluster on your kubeconfig. - -2. Open the file `provisioning/kubernetes/aws-support/efs-service-account-values.yaml` edit the file and change the following values: - -{account_id} matched with your AWS account ID -{var.app_name} matched with your app_name value set on terraform -{var.app_namespace} matched with the namespace used on the environment -{var.tfenv} matched with the tfenv - -Save the file. - -2. Apply the annotation from the values.yaml file by `kubectl apply -f provisioning/kubernetes/aws-support/efs-service-account-values.yaml` - - - diff --git a/provisioning/kubernetes/aws-support/efs-service-account-values.yaml b/provisioning/kubernetes/aws-support/efs-service-account-values.yaml deleted file mode 100644 index 4badb78..0000000 --- a/provisioning/kubernetes/aws-support/efs-service-account-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: efs-csi-controller-sa - namespace: kube-system - labels: - app.kubernetes.io/name: aws-efs-csi-driver - annotations: - eks.amazonaws.com/role-arn: arn:aws:iam:::role/AmazonEKS_EFS_CSI_DriverRole- \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/efs-serviceaccount.tf b/provisioning/kubernetes/aws-support/efs-serviceaccount.tf deleted file mode 100644 index d80b1f8..0000000 --- a/provisioning/kubernetes/aws-support/efs-serviceaccount.tf +++ /dev/null @@ -1,14 +0,0 @@ -#resource "kubernetes_service_account" "efs-csi-controller-service-account" { -# depends_on = [helm_release.aws-efs-csi-driver] -# metadata { -# name = "efs-csi-controller-sa" -# namespace = "kube-system" -# labels = { -# "app.kubernetes.io/name": "aws-efs-csi-driver" -# } -# annotations = { -# "eks.amazonaws.com/role-arn": "${data.aws_caller_identity.aws-support.account_id}:role/${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-EFS_CSI_Driver-role" -# } -# } -# automount_service_account_token = true -#} diff --git a/provisioning/kubernetes/aws-support/iam-policy.efs.tf b/provisioning/kubernetes/aws-support/iam-policy.efs.tf deleted file mode 100644 index 7d4383f..0000000 --- a/provisioning/kubernetes/aws-support/iam-policy.efs.tf +++ /dev/null @@ -1,74 +0,0 @@ -resource "aws_iam_policy" "amazoneks-efs-csi-driver-policy" { - name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-EFS_CSI_Driver-policy" - path = "/" - description = "EKS EFS CSI Driver policy for cluster ${var.app_name}-${var.app_namespace}-${var.tfenv}" - policy = data.aws_iam_policy_document.efs_csi_driver.json - tags = var.tags -} - -resource "aws_iam_role" "amazoneks-efs-csi-driver-role" { - name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-EFS_CSI_Driver-role" - assume_role_policy = data.aws_iam_policy_document.eks_efs_csi_driver_trust_policy.json - tags = var.tags -} - -resource "aws_iam_role_policy_attachment" "eks-efs-csi-driver-attachment" { - role = aws_iam_role.amazoneks-efs-csi-driver-role.name - policy_arn = aws_iam_policy.amazoneks-efs-csi-driver-policy.arn -} - -data "aws_iam_policy_document" "efs_csi_driver" { - statement { - effect = "Allow" - actions = [ - "elasticfilesystem:DescribeAccessPoints", - "elasticfilesystem:DescribeFileSystems" - ] - resources = ["*"] - } - - statement { - effect = "Allow" - actions = [ - "elasticfilesystem:CreateAccessPoint" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "aws:RequestTag/efs.csi.aws.com/cluster" - values = ["true"] - } - } - - statement { - effect = "Allow" - actions = [ - "elasticfilesystem:DeleteAccessPoint" - ] - resources = ["*"] - condition { - test = "StringEquals" - variable = "aws:ResourceTag/efs.csi.aws.com/cluster" - values = ["true"] - } - } -} - - -data "aws_iam_policy_document" "eks_efs_csi_driver_trust_policy" { - statement { - effect = "Allow" - principals { - type = "Federated" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.aws-support.account_id}:oidc-provider/oidc.eks.${var.aws_region}.amazonaws.com/id/${substr(var.oidc_url, -32, -1)}"] - } - actions = [ - "sts:AssumeRoleWithWebIdentity" - ] - condition { - test = "StringEquals" - variable = "oidc.eks.${var.aws_region}.amazonaws.com/id/${substr(var.oidc_url, -32, -1)}:sub" - values = ["system:serviceaccount:kube-system:efs-csi-controller-sa"] - } - } -} diff --git a/provisioning/kubernetes/aws-support/iam-policy.gp3.tf b/provisioning/kubernetes/aws-support/iam-policy.gp3.tf deleted file mode 100644 index 92768b5..0000000 --- a/provisioning/kubernetes/aws-support/iam-policy.gp3.tf +++ /dev/null @@ -1,191 +0,0 @@ -resource "aws_iam_policy" "amazoneks-ebs-csi-driver-policy" { - name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-ebs_csi_driver-policy" - path = "/" - description = "EKS ebs CSI Driver policy for cluster ${var.app_name}-${var.app_namespace}-${var.tfenv}" - policy = data.aws_iam_policy_document.ebs_csi_driver.json - tags = var.tags -} - -resource "aws_iam_role" "amazoneks-ebs-csi-driver-role" { - name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-AmazonEKS-ebs_csi_driver-role" - assume_role_policy = data.aws_iam_policy_document.eks_ebs_csi_driver_trust_policy.json - tags = var.tags -} - -resource "aws_iam_role_policy_attachment" "eks-ebs-csi-driver-attachment" { - role = aws_iam_role.amazoneks-ebs-csi-driver-role.name - policy_arn = aws_iam_policy.amazoneks-ebs-csi-driver-policy.arn -} - -data "aws_iam_policy_document" "ebs_csi_driver" { - statement { - effect = "Allow" - actions = [ - "ec2:CreateSnapshot", - "ec2:AttachVolume", - "ec2:DetachVolume", - "ec2:ModifyVolume", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeInstances", - "ec2:DescribeSnapshots", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVolumesModifications" - ] - resources = ["*"] - } - - statement { - effect = "Allow" - actions = [ - "ec2:CreateTags" - ] - resources = [ - "arn:aws:ec2:*:*:volume/*", - "arn:aws:ec2:*:*:snapshot/*" - ] - condition { - test = "StringEquals" - variable = "ec2:CreateAction" - values = [ - "CreateVolume", - "CreateSnapshot" - ] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteTags" - ] - resources = [ - "arn:aws:ec2:*:*:volume/*", - "arn:aws:ec2:*:*:snapshot/*" - ] - } - - statement { - effect = "Allow" - actions = [ - "ec2:CreateVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "aws:RequestTag/ebs.csi.aws.com/cluster" - values = ["true"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:CreateVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "aws:RequestTag/CSIVolumeName" - values = ["*"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:CreateVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "aws:RequestTag/kubernetes.io/cluster/*" - values = ["owned"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "ec2:ResourceTag/ebs.csi.aws.com/cluster" - values = ["true"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "ec2:ResourceTag/CSIVolumeName" - values = ["*"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteVolume" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "ec2:ResourceTag/kubernetes.io/cluster/*" - values = ["owned"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteSnapshot" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "ec2:ResourceTag/CSIVolumeSnapshotName" - values = ["*"] - } - } - - statement { - effect = "Allow" - actions = [ - "ec2:DeleteSnapshot" - ] - resources = ["*"] - condition { - test = "StringLike" - variable = "ec2:ResourceTag/ebs.csi.aws.com/cluster" - values = ["true"] - } - } - - -} - -data "aws_iam_policy_document" "eks_ebs_csi_driver_trust_policy" { - statement { - effect = "Allow" - principals { - type = "Federated" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.aws-support.account_id}:oidc-provider/oidc.eks.${var.aws_region}.amazonaws.com/id/${substr(var.oidc_url, -32, -1)}"] - } - actions = [ - "sts:AssumeRoleWithWebIdentity" - ] - condition { - test = "StringEquals" - variable = "oidc.eks.${var.aws_region}.amazonaws.com/id/${substr(var.oidc_url, -32, -1)}:sub" - values = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"] - } - } -} diff --git a/provisioning/kubernetes/aws-support/src/autoscaler-iam-policy.json b/provisioning/kubernetes/aws-support/src/autoscaler-iam-policy.json deleted file mode 100644 index 6c76c87..0000000 --- a/provisioning/kubernetes/aws-support/src/autoscaler-iam-policy.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": "*", - "Effect": "Allow" - } - ] -} \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/src/ebs-storage-class.yml b/provisioning/kubernetes/aws-support/src/ebs-storage-class.yml deleted file mode 100644 index 52ef528..0000000 --- a/provisioning/kubernetes/aws-support/src/ebs-storage-class.yml +++ /dev/null @@ -1,6 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ebs-sc -provisioner: ebs.csi.aws.com -volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/provisioning/kubernetes/aws-support/src/efs-storage-class.yml b/provisioning/kubernetes/aws-support/src/efs-storage-class.yml deleted file mode 100644 index da2890e..0000000 --- a/provisioning/kubernetes/aws-support/src/efs-storage-class.yml +++ /dev/null @@ -1,8 +0,0 @@ -# https://aws.amazon.com/premiumsupport/knowledge-center/eks-persistent-storage/ -# kubectl apply -k "github.com/kubernetes-sigs/aws-efs-csi-driver/deploy/kubernetes/overlays/stable/?ref=master" - -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: efs -provisioner: efs.csi.aws.com \ No newline at end of file diff --git a/provisioning/kubernetes/certmanager/certmanager.tf b/provisioning/kubernetes/certmanager/certmanager.tf index a30dd98..b471e84 100644 --- a/provisioning/kubernetes/certmanager/certmanager.tf +++ b/provisioning/kubernetes/certmanager/certmanager.tf @@ -2,13 +2,16 @@ resource "helm_release" "certmanager" { name = "cert-manager" repository = "https://charts.jetstack.io" chart = "cert-manager" - version = "v1.5.3" + version = "v1.9.1" namespace = "cert-manager" create_namespace = true - set { - name = "installCRDs" - value = true - } + values = var.custom_manifest != null ? [var.custom_manifest] : [< "filebeat-%%{[agent.version]}-%%{+yyyy.MM.dd}" } s3 { - access_key_id => ${module.iam_user.this_iam_access_key_id} - secret_access_key => "${module.iam_user.this_iam_access_key_secret}" + access_key_id => ${module.iam_user.iam_access_key_id} + secret_access_key => "${module.iam_user.iam_access_key_secret}" endpoint => "https://s3.${var.aws_region}.amazonaws.com" region => "${var.aws_region}" bucket => "${var.app_name}-${var.app_namespace}-${var.tfenv}-elasticstack-logs" diff --git a/provisioning/kubernetes/elastic-stack/s3.tf b/provisioning/kubernetes/elastic-stack/s3.tf index 6b1bb1e..6884244 100644 --- a/provisioning/kubernetes/elastic-stack/s3.tf +++ b/provisioning/kubernetes/elastic-stack/s3.tf @@ -6,7 +6,7 @@ resource "aws_kms_key" "eks_logging" { module "log_bucket" { source = "terraform-aws-modules/s3-bucket/aws" - version = "~> 2.6" + version = "~> 3.2" # create_bucket = var.tfenv == "prod" ? true : false @@ -34,7 +34,7 @@ module "log_bucket" { module "s3_elasticstack_bucket" { source = "terraform-aws-modules/s3-bucket/aws" - version = "~> 2.6" + version = "~> 3.2" # create_bucket = var.tfenv == "prod" ? true : false @@ -106,7 +106,7 @@ module "s3_elasticstack_bucket" { ### IAM USER DEFINITION module "iam_user" { source = "terraform-aws-modules/iam/aws//modules/iam-user" - version = "~> 3.0" + version = "~> 4.24" name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-elasticstack-s3-user" path = "/serviceaccounts/${var.app_name}/${var.app_namespace}/" @@ -235,7 +235,7 @@ data "aws_iam_policy_document" "s3_policy_data" { } resource "aws_iam_user_policy_attachment" "s3_attach" { - user = module.iam_user.this_iam_user_name + user = module.iam_user.iam_user_name policy_arn = aws_iam_policy.s3_access_policy.arn } @@ -263,6 +263,6 @@ data "aws_iam_policy_document" "kms_policy_data" { } resource "aws_iam_user_policy_attachment" "kms_attach" { - user = module.iam_user.this_iam_user_name + user = module.iam_user.iam_user_name policy_arn = aws_iam_policy.kms_access_policy.arn } diff --git a/provisioning/kubernetes/grafana/grafana.tf b/provisioning/kubernetes/grafana/grafana.tf index 52a31f9..fb1f69d 100644 --- a/provisioning/kubernetes/grafana/grafana.tf +++ b/provisioning/kubernetes/grafana/grafana.tf @@ -4,7 +4,7 @@ resource "helm_release" "grafana" { chart = "grafana" namespace = "monitoring" - values = [< Date: Tue, 16 Aug 2022 01:30:25 +0800 Subject: [PATCH 02/11] Minor updates --- README.md | 9 ++++--- outputs.tf | 6 ++--- variables.tf | 60 +++++++++++++++++++++++++++++++++---------- versions.tf | 2 +- vpc_infrastructure.tf | 15 ++++++++--- 5 files changed, 68 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 590c90e..e699f1b 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.1 | | [aws](#requirement\_aws) | ~> 4.5 | | [gitlab](#requirement\_gitlab) | ~> 3.4 | | [helm](#requirement\_helm) | ~> 2.0 | @@ -175,15 +175,17 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [default\_capacity\_type](#input\_default\_capacity\_type) | Default capacity configuraiton used for node provisioning. Valid values: `ON_DEMAND, SPOT` | `string` | `"ON_DEMAND"` | no | | [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Override default 'single nodegroup, on a private subnet' with more advaned configuration archetypes | `any` | `[]` | no | | [elastic\_ip\_custom\_configuration](#input\_elastic\_ip\_custom\_configuration) | By default, this module will provision new Elastic IPs for the VPC's NAT Gateways; however, one can also override and specify separate, pre-existing elastic IPs as needed in order to preserve IPs that are whitelisted; reminder that the list of EIPs should have the same count as nat gateways created. |
object({
enabled = bool
reuse_nat_ips = optional(bool)
external_nat_ip_ids = optional(list(string))
})
|
{
"enabled": false,
"external_nat_ip_ids": [],
"reuse_nat_ips": false
}
| no | +| [gitlab\_kubernetes\_agent\_config](#input\_gitlab\_kubernetes\_agent\_config) | Configuration for Gitlab Kubernetes Agent |
object({
gitlab_agent_url = string
gitlab_agent_secret = string
})
|
{
"gitlab_agent_secret": "",
"gitlab_agent_url": ""
}
| no | | [google\_authDomain](#input\_google\_authDomain) | Used for Infrastructure OAuth: Google Auth Domain | `any` | n/a | yes | | [google\_clientID](#input\_google\_clientID) | Used for Infrastructure OAuth: Google Auth Client ID | `any` | n/a | yes | | [google\_clientSecret](#input\_google\_clientSecret) | Used for Infrastructure OAuth: Google Auth Client Secret | `any` | n/a | yes | | [helm\_configurations](#input\_helm\_configurations) | n/a |
object({
dashboard = optional(string)
gitlab_runner = optional(string)
vault_consul = optional(object({
consul_values = optional(string)
vault_values = optional(string)
enable_aws_vault_unseal = optional(bool) # If Vault is enabled and deployed, by default, the unseal process is manual; Changing this to true allows for automatic unseal using AWS KMS"
vault_nodeselector = optional(string) # Allow for vault node selectors without extensive reconfiguration of the standard values file
vault_tolerations = optional(string) # Allow for tolerating certain taint on nodes, example usage, string:'NoExecute:we_love_hashicorp:true'
}))
ingress = optional(object({
nginx_values = optional(string)
certmanager_values = optional(string)
}))
elasticstack = optional(string)
grafana = optional(string)
argocd = optional(object({
value_file = optional(string)
application_set = optional(list(string))
repository_secrets = optional(list(object({
name = string
url = string
type = string
username = string
password = string
secrets_store = string
})))
credential_templates = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
})))
registry_secrets = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
auth = string
email = string
})))
generate_plugin_repository_secret = optional(bool)
}))
})
|
{
"argocd": null,
"dashboard": null,
"elasticstack": null,
"gitlab_runner": null,
"grafana": null,
"ingress": null,
"vault_consul": null
}
| no | -| [helm\_installations](#input\_helm\_installations) | n/a |
object({
dashboard = bool
gitlab_runner = bool
vault_consul = bool
ingress = bool
elasticstack = bool
grafana = bool
argocd = bool
})
|
{
"argocd": false,
"dashboard": true,
"elasticstack": false,
"gitlab_runner": false,
"grafana": true,
"ingress": true,
"vault_consul": true
}
| no | +| [helm\_installations](#input\_helm\_installations) | n/a |
object({
dashboard = bool
gitlab_runner = bool
gitlab_k8s_agent = bool
vault_consul = bool
ingress = bool
elasticstack = bool
grafana = bool
argocd = bool
stakater_reloader = bool
metrics_server = bool
})
|
{
"argocd": false,
"dashboard": true,
"elasticstack": false,
"gitlab_k8s_agent": false,
"gitlab_runner": false,
"grafana": true,
"ingress": true,
"metrics_server": true,
"stakater_reloader": false,
"vault_consul": true
}
| no | | [instance\_desired\_size](#input\_instance\_desired\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `2` | no | | [instance\_max\_size](#input\_instance\_max\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `4` | no | | [instance\_min\_size](#input\_instance\_min\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `1` | no | | [instance\_type](#input\_instance\_type) | AWS Instance Type for provisioning | `string` | `"c5a.medium"` | no | +| [ipv6](#input\_ipv6) | n/a |
object({
enable = bool
assign_ipv6_address_on_creation = bool
private_subnet_assign_ipv6_address_on_creation = bool
public_subnet_assign_ipv6_address_on_creation = bool
})
|
{
"assign_ipv6_address_on_creation": true,
"enable": false,
"private_subnet_assign_ipv6_address_on_creation": true,
"public_subnet_assign_ipv6_address_on_creation": true
}
| no | | [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no | | [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | | [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | @@ -206,8 +208,9 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [aws\_region](#output\_aws\_region) | # ----------- ## Region and AWS Profile Checks # ----------- | | [base\_cidr\_block](#output\_base\_cidr\_block) | n/a | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | n/a | +| [kubecfg](#output\_kubecfg) | n/a | | [kubernetes-cluster-auth](#output\_kubernetes-cluster-auth) | n/a | -| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | output "kubecfg" { value = module.eks.kubeconfig } | +| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | n/a | | [kubernetes-cluster-endpoint](#output\_kubernetes-cluster-endpoint) | n/a | | [kubernetes-cluster-id](#output\_kubernetes-cluster-id) | n/a | | [private\_route\_table\_ids](#output\_private\_route\_table\_ids) | n/a | diff --git a/outputs.tf b/outputs.tf index 59508e6..8d7e834 100644 --- a/outputs.tf +++ b/outputs.tf @@ -2,9 +2,9 @@ ## MODULE: EKS ## ----------- -// output "kubecfg" { -// value = module.eks.kubeconfig -// } +output "kubecfg" { + value = module.eks.kubeconfig +} output "kubernetes-cluster-certificate-authority-data" { value = module.eks.cluster_certificate_authority_data } diff --git a/variables.tf b/variables.tf index 6dada6e..02bad95 100644 --- a/variables.tf +++ b/variables.tf @@ -292,22 +292,28 @@ variable "aws_installations" { variable "helm_installations" { type = object({ - dashboard = bool - gitlab_runner = bool - vault_consul = bool - ingress = bool - elasticstack = bool - grafana = bool - argocd = bool + dashboard = bool + gitlab_runner = bool + gitlab_k8s_agent = bool + vault_consul = bool + ingress = bool + elasticstack = bool + grafana = bool + argocd = bool + stakater_reloader = bool + metrics_server = bool }) default = { - dashboard = true - gitlab_runner = false - vault_consul = true - ingress = true - elasticstack = false - grafana = true - argocd = false + dashboard = true + gitlab_runner = false + gitlab_k8s_agent = false + vault_consul = true + ingress = true + elasticstack = false + grafana = true + argocd = false + stakater_reloader = false + metrics_server = true } } variable "helm_configurations" { @@ -436,6 +442,18 @@ variable "default_ami_type" { default = "AL2_x86_64" } +variable "gitlab_kubernetes_agent_config" { + description = "Configuration for Gitlab Kubernetes Agent" + type = object({ + gitlab_agent_url = string + gitlab_agent_secret = string + }) + sensitive = true + default = { + gitlab_agent_url = "" + gitlab_agent_secret = "" + } +} variable "default_capacity_type" { description = "Default capacity configuraiton used for node provisioning. Valid values: `ON_DEMAND, SPOT`" default = "ON_DEMAND" @@ -455,3 +473,17 @@ variable "registry_credentials" { default = [] } +variable "ipv6" { + type = object({ + enable = bool + assign_ipv6_address_on_creation = bool + private_subnet_assign_ipv6_address_on_creation = bool + public_subnet_assign_ipv6_address_on_creation = bool + }) + default = { + enable = false + assign_ipv6_address_on_creation = true + private_subnet_assign_ipv6_address_on_creation = true + public_subnet_assign_ipv6_address_on_creation = true + } +} diff --git a/versions.tf b/versions.tf index efd2c2c..1d6a12c 100644 --- a/versions.tf +++ b/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.1" experiments = [module_variable_optional_attrs] required_providers { diff --git a/vpc_infrastructure.tf b/vpc_infrastructure.tf index 2fbdc17..2efd3c5 100755 --- a/vpc_infrastructure.tf +++ b/vpc_infrastructure.tf @@ -39,7 +39,7 @@ module "eks-vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.14" - name = "eks-${var.app_namespace}-${var.tfenv}-cluster-vpc" + name = "${app_name}-${var.app_namespace}-${var.tfenv}-cluster-vpc" cidr = module.subnet_addrs.base_cidr_block azs = data.aws_availability_zones.available_azs.names # TODO: Modularise these arrays: https://gitlab.com/nicosingh/medium-deploy-eks-cluster-using-terraform/-/blob/master/network.tf @@ -79,6 +79,15 @@ module "eks-vpc" { create_flow_log_cloudwatch_iam_role = coalesce(var.vpc_flow_logs.enabled, var.tfenv == "prod" ? true : false) flow_log_max_aggregation_interval = 60 + #IPv6 section + enable_ipv6 = var.ipv6.enable + assign_ipv6_address_on_creation = var.ipv6.assign_ipv6_address_on_creation + private_subnet_assign_ipv6_address_on_creation = var.ipv6.private_subnet_assign_ipv6_address_on_creation + public_subnet_assign_ipv6_address_on_creation = var.ipv6.public_subnet_assign_ipv6_address_on_creation + + public_subnet_ipv6_prefixes = [0, 1, 2] + private_subnet_ipv6_prefixes = [3, 4, 5] + tags = merge({ "kubernetes.io/cluster/${local.name_prefix}" = "shared" }, local.base_tags) @@ -124,8 +133,8 @@ module "eks-vpc-endpoints" { resource "aws_vpc_endpoint" "rds" { lifecycle { ignore_changes = [dns_entry] } - vpc_id = module.eks-vpc.vpc_id - + vpc_id = module.eks-vpc.vpc_id + depends_on = [module.eks-vpc] service_name = "com.amazonaws.${var.aws_region}.rds" vpc_endpoint_type = "Interface" private_dns_enabled = true From 2d9646b1cadf790a8fd927e4232fe6f94fc798c3 Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Tue, 16 Aug 2022 01:33:02 +0800 Subject: [PATCH 03/11] Missing helm installations --- kubernetes-helm.tf | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/kubernetes-helm.tf b/kubernetes-helm.tf index e785d46..a2722a1 100644 --- a/kubernetes-helm.tf +++ b/kubernetes-helm.tf @@ -67,6 +67,57 @@ module "vault" { custom_manifest = var.helm_configurations.vault_consul } +module "vault-secrets-webhook" { + source = "./provisioning/kubernetes/bonzai-vault-secrets-webhook" + depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + count = var.helm_installations.vault_consul ? 1 : 0 + + vault_nodeselector = var.vault_nodeselector + vault_tolerations = var.vault_tolerations + app_namespace = var.app_namespace + tfenv = var.tfenv +} + +module "vault-operator" { + source = "./provisioning/kubernetes/bonzai-vault-operator" + depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + count = var.helm_installations.vault_consul ? 1 : 0 + + vault_nodeselector = var.vault_nodeselector + vault_tolerations = var.vault_tolerations + app_namespace = var.app_namespace + tfenv = var.tfenv +} + +module "stakater-reloader" { + source = "./provisioning/kubernetes/stakater-reloader" + depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + count = var.helm_installations.stakater_reloader ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv +} + +module "metrics-server" { + source = "./provisioning/kubernetes/metrics-server" + depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + count = var.helm_installations.metrics_server ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv +} + +module "gitlab-k8s-agent" { + source = "./provisioning/kubernetes/gitlab-kubernetes-agent" + depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces] + count = var.helm_installations.gitlab_k8s_agent ? 1 : 0 + + app_namespace = var.app_namespace + tfenv = var.tfenv + gitlab_agent_url = var.gitlab_kubernetes_agent_config.gitlab_agent_url + gitlab_agent_secret = var.gitlab_kubernetes_agent_config.gitlab_agent_secret +} + module "elastic-stack" { source = "./provisioning/kubernetes/elastic-stack" depends_on = [module.eks] From c780589df8b940a719d29241ec49d3b15216271b Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Tue, 16 Aug 2022 23:40:31 +0800 Subject: [PATCH 04/11] Still testing v3.0.0 --- README.md | 10 +++++++--- VERSION | 2 +- kubernetes-helm.tf | 20 +++++++++---------- outputs.tf | 4 ---- .../gitlab-kubernetes-agent/gitlab-agent.tf | 9 ++------- .../gitlab-kubernetes-agent/variables.tf | 8 ++++++++ variables.tf | 6 +++--- vpc_infrastructure.tf | 2 +- 8 files changed, 32 insertions(+), 29 deletions(-) create mode 100644 provisioning/kubernetes/gitlab-kubernetes-agent/variables.tf diff --git a/README.md b/README.md index e699f1b..d06256a 100644 --- a/README.md +++ b/README.md @@ -122,11 +122,16 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [eks-vpc-endpoints](#module\_eks-vpc-endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | ~> 3.14 | | [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | ~> 18.23.0 | | [elastic-stack](#module\_elastic-stack) | ./provisioning/kubernetes/elastic-stack | n/a | +| [gitlab-k8s-agent](#module\_gitlab-k8s-agent) | ./provisioning/kubernetes/gitlab-kubernetes-agent | n/a | | [grafana](#module\_grafana) | ./provisioning/kubernetes/grafana | n/a | | [kubernetes-dashboard](#module\_kubernetes-dashboard) | ./provisioning/kubernetes/kubernetes-dashboard | n/a | +| [metrics-server](#module\_metrics-server) | ./provisioning/kubernetes/metrics-server | n/a | | [nginx-controller-ingress](#module\_nginx-controller-ingress) | ./provisioning/kubernetes/nginx-controller | n/a | +| [stakater-reloader](#module\_stakater-reloader) | ./provisioning/kubernetes/stakater-reloader | n/a | | [subnet\_addrs](#module\_subnet\_addrs) | hashicorp/subnets/cidr | 1.0.0 | | [vault](#module\_vault) | ./provisioning/kubernetes/hashicorp-vault | n/a | +| [vault-operator](#module\_vault-operator) | ./provisioning/kubernetes/bonzai-vault-operator | n/a | +| [vault-secrets-webhook](#module\_vault-secrets-webhook) | ./provisioning/kubernetes/bonzai-vault-secrets-webhook | n/a | ## Resources @@ -185,7 +190,7 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [instance\_max\_size](#input\_instance\_max\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `4` | no | | [instance\_min\_size](#input\_instance\_min\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `1` | no | | [instance\_type](#input\_instance\_type) | AWS Instance Type for provisioning | `string` | `"c5a.medium"` | no | -| [ipv6](#input\_ipv6) | n/a |
object({
enable = bool
assign_ipv6_address_on_creation = bool
private_subnet_assign_ipv6_address_on_creation = bool
public_subnet_assign_ipv6_address_on_creation = bool
})
|
{
"assign_ipv6_address_on_creation": true,
"enable": false,
"private_subnet_assign_ipv6_address_on_creation": true,
"public_subnet_assign_ipv6_address_on_creation": true
}
| no | +| [ipv6](#input\_ipv6) | n/a |
object({
enable = bool
assign_ipv6_address_on_creation = bool
private_subnet_assign_ipv6_address_on_creation = bool
public_subnet_assign_ipv6_address_on_creation = bool
})
|
{
"assign_ipv6_address_on_creation": false,
"enable": false,
"private_subnet_assign_ipv6_address_on_creation": false,
"public_subnet_assign_ipv6_address_on_creation": false
}
| no | | [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no | | [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | | [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | @@ -208,9 +213,8 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [aws\_region](#output\_aws\_region) | # ----------- ## Region and AWS Profile Checks # ----------- | | [base\_cidr\_block](#output\_base\_cidr\_block) | n/a | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | n/a | -| [kubecfg](#output\_kubecfg) | n/a | | [kubernetes-cluster-auth](#output\_kubernetes-cluster-auth) | n/a | -| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | n/a | +| [kubernetes-cluster-certificate-authority-data](#output\_kubernetes-cluster-certificate-authority-data) | # ----------- # MODULE: EKS # ----------- | | [kubernetes-cluster-endpoint](#output\_kubernetes-cluster-endpoint) | n/a | | [kubernetes-cluster-id](#output\_kubernetes-cluster-id) | n/a | | [private\_route\_table\_ids](#output\_private\_route\_table\_ids) | n/a | diff --git a/VERSION b/VERSION index 56fea8a..4a36342 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0 \ No newline at end of file +3.0.0 diff --git a/kubernetes-helm.tf b/kubernetes-helm.tf index a2722a1..8b66184 100644 --- a/kubernetes-helm.tf +++ b/kubernetes-helm.tf @@ -69,29 +69,29 @@ module "vault" { module "vault-secrets-webhook" { source = "./provisioning/kubernetes/bonzai-vault-secrets-webhook" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + depends_on = [module.eks, module.vault] count = var.helm_installations.vault_consul ? 1 : 0 - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations + vault_nodeselector = try(var.helm_configurations.vault_consul.vault_nodeselector, "") != null ? var.helm_configurations.vault_consul.vault_nodeselector : "" + vault_tolerations = try(var.helm_configurations.vault_consul.vault_tolerations, "") != null ? var.helm_configurations.vault_consul.vault_tolerations : "" app_namespace = var.app_namespace tfenv = var.tfenv } module "vault-operator" { source = "./provisioning/kubernetes/bonzai-vault-operator" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + depends_on = [module.eks, module.vault] count = var.helm_installations.vault_consul ? 1 : 0 - vault_nodeselector = var.vault_nodeselector - vault_tolerations = var.vault_tolerations + vault_nodeselector = try(var.helm_configurations.vault_consul.vault_nodeselector, "") != null ? var.helm_configurations.vault_consul.vault_nodeselector : "" + vault_tolerations = try(var.helm_configurations.vault_consul.vault_tolerations, "") != null ? var.helm_configurations.vault_consul.vault_tolerations : "" app_namespace = var.app_namespace tfenv = var.tfenv } module "stakater-reloader" { source = "./provisioning/kubernetes/stakater-reloader" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + depends_on = [module.eks] count = var.helm_installations.stakater_reloader ? 1 : 0 app_namespace = var.app_namespace @@ -100,7 +100,7 @@ module "stakater-reloader" { module "metrics-server" { source = "./provisioning/kubernetes/metrics-server" - depends_on = [module.eks-vpc, module.eks, aws_eks_node_group.custom_node_group, module.namespaces, module.nginx-controller-ingress, module.certmanager] + depends_on = [module.eks] count = var.helm_installations.metrics_server ? 1 : 0 app_namespace = var.app_namespace @@ -109,7 +109,7 @@ module "metrics-server" { module "gitlab-k8s-agent" { source = "./provisioning/kubernetes/gitlab-kubernetes-agent" - depends_on = [module.eks, aws_eks_node_group.custom_node_group, module.namespaces] + depends_on = [module.eks] count = var.helm_installations.gitlab_k8s_agent ? 1 : 0 app_namespace = var.app_namespace @@ -167,7 +167,7 @@ module "argocd" { # module "gitlab_runner" { # source = "./provisioning/kubernetes/gitlab-runner" -# depends_on = [module.namespaces, module.eks-vpc] +# depends_on = module.eks-vpc] # count = var.helm_installations.gitlab_runner ? 1 : 0 # app_name = var.app_name diff --git a/outputs.tf b/outputs.tf index 8d7e834..2d47f1f 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,10 +1,6 @@ ## ----------- ## MODULE: EKS ## ----------- - -output "kubecfg" { - value = module.eks.kubeconfig -} output "kubernetes-cluster-certificate-authority-data" { value = module.eks.cluster_certificate_authority_data } diff --git a/provisioning/kubernetes/gitlab-kubernetes-agent/gitlab-agent.tf b/provisioning/kubernetes/gitlab-kubernetes-agent/gitlab-agent.tf index cdaa57e..503f516 100644 --- a/provisioning/kubernetes/gitlab-kubernetes-agent/gitlab-agent.tf +++ b/provisioning/kubernetes/gitlab-kubernetes-agent/gitlab-agent.tf @@ -2,7 +2,7 @@ resource "helm_release" "gitlab-k8s-agent" { name = "gitlab-kubernetes-agent-${var.app_namespace}-${var.tfenv}" repository = "https://charts.gitlab.io" chart = "gitlab-agent" - version = "v0.6.1" + version = var.chart_version namespace = "gitlab-agent" create_namespace = true @@ -14,9 +14,4 @@ resource "helm_release" "gitlab-k8s-agent" { secretName: "gitlab-agent-token" EOF ] -} - -variable "app_namespace" {} -variable "tfenv" {} -variable "gitlab_agent_url" {} -variable "gitlab_agent_secret" {} \ No newline at end of file +} \ No newline at end of file diff --git a/provisioning/kubernetes/gitlab-kubernetes-agent/variables.tf b/provisioning/kubernetes/gitlab-kubernetes-agent/variables.tf new file mode 100644 index 0000000..efa0b59 --- /dev/null +++ b/provisioning/kubernetes/gitlab-kubernetes-agent/variables.tf @@ -0,0 +1,8 @@ +variable "app_namespace" {} +variable "tfenv" {} +variable "gitlab_agent_url" {} +variable "gitlab_agent_secret" {} +variable "chart_version" { + type = string + default = "v1.4.0" +} \ No newline at end of file diff --git a/variables.tf b/variables.tf index 02bad95..f81baf7 100644 --- a/variables.tf +++ b/variables.tf @@ -482,8 +482,8 @@ variable "ipv6" { }) default = { enable = false - assign_ipv6_address_on_creation = true - private_subnet_assign_ipv6_address_on_creation = true - public_subnet_assign_ipv6_address_on_creation = true + assign_ipv6_address_on_creation = false + private_subnet_assign_ipv6_address_on_creation = false + public_subnet_assign_ipv6_address_on_creation = false } } diff --git a/vpc_infrastructure.tf b/vpc_infrastructure.tf index 2efd3c5..e0bdee8 100755 --- a/vpc_infrastructure.tf +++ b/vpc_infrastructure.tf @@ -39,7 +39,7 @@ module "eks-vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.14" - name = "${app_name}-${var.app_namespace}-${var.tfenv}-cluster-vpc" + name = "${var.app_name}-${var.app_namespace}-${var.tfenv}-cluster-vpc" cidr = module.subnet_addrs.base_cidr_block azs = data.aws_availability_zones.available_azs.names # TODO: Modularise these arrays: https://gitlab.com/nicosingh/medium-deploy-eks-cluster-using-terraform/-/blob/master/network.tf From c7b3baf83b5d84f21b05ca3d7df4d3b6efb42d4e Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Tue, 16 Aug 2022 23:51:43 +0800 Subject: [PATCH 05/11] Add role cm --- cluster-aws-auth-cm.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster-aws-auth-cm.tf b/cluster-aws-auth-cm.tf index 92ca7fc..210b9e6 100644 --- a/cluster-aws-auth-cm.tf +++ b/cluster-aws-auth-cm.tf @@ -9,7 +9,7 @@ resource "kubectl_manifest" "aws-auth" { "data" = { "mapUsers" = yamlencode(var.map_users) "mapAccounts" = yamlencode(var.map_accounts) - "mapRoles" = yamlencode(local.aws_auth_roles) + "mapRoles" = yamlencode(concat(local.aws_auth_roles, var.map_roles)) } }) } \ No newline at end of file From 5b5ba9472964bb79624b6117c94610700f1032dd Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Thu, 18 Aug 2022 13:43:54 +0800 Subject: [PATCH 06/11] Updating cluster nodegroups to use for_each --- cluster-nodegroups.tf | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/cluster-nodegroups.tf b/cluster-nodegroups.tf index 9493654..0f93644 100644 --- a/cluster-nodegroups.tf +++ b/cluster-nodegroups.tf @@ -2,59 +2,61 @@ module "eks_managed_node_group" { source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group" version = "~> 18.23.0" - count = length(var.eks_managed_node_groups) + for_each = { + for node_group in var.eks_managed_node_groups : node_group.name => node_group + } - name = var.eks_managed_node_groups[count.index].name + name = each.value.name use_name_prefix = false cluster_name = module.eks.cluster_id cluster_version = var.cluster_version create_iam_role = true - iam_role_name = "${module.eks.cluster_id}-${var.eks_managed_node_groups[count.index].name}" + iam_role_name = "${module.eks.cluster_id}-${each.value.name}" iam_role_attach_cni_policy = true iam_role_use_name_prefix = false - launch_template_name = "${module.eks.cluster_id}-${var.eks_managed_node_groups[count.index].name}" + launch_template_name = "${module.eks.cluster_id}-${each.value.name}" launch_template_use_name_prefix = false # iam_role_arn = module.eks.eks_managed_node_groups.iam_role.arn # cluster_ip_family = "ipv6" # NOT READY vpc_id = module.eks-vpc.vpc_id subnet_ids = concat( - var.eks_managed_node_groups[count.index].subnet_selections.public ? module.eks-vpc.public_subnets : [], - var.eks_managed_node_groups[count.index].subnet_selections.private ? module.eks-vpc.private_subnets : [] + each.value.subnet_selections.public ? module.eks-vpc.public_subnets : [], + each.value.subnet_selections.private ? module.eks-vpc.private_subnets : [] ) cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id # vpc_security_group_ids = [module.eks.node_security_group_id] create_security_group = false - desired_size = var.eks_managed_node_groups[count.index].desired_capacity - max_size = var.eks_managed_node_groups[count.index].max_capacity - min_size = var.eks_managed_node_groups[count.index].min_capacity + desired_size = each.value.desired_capacity + max_size = each.value.max_capacity + min_size = each.value.min_capacity - instance_types = var.eks_managed_node_groups[count.index].instance_types - ami_type = var.eks_managed_node_groups[count.index].ami_type != null ? var.eks_managed_node_groups[count.index].ami_type : var.default_ami_type - capacity_type = var.eks_managed_node_groups[count.index].capacity_type != null ? var.eks_managed_node_groups[count.index].capacity_type : var.default_capacity_type + instance_types = each.value.instance_types + ami_type = each.value.ami_type != null ? each.value.ami_type : var.default_ami_type + capacity_type = each.value.capacity_type != null ? each.value.capacity_type : var.default_capacity_type - disk_size = var.eks_managed_node_groups[count.index].disk_size + disk_size = each.value.disk_size ebs_optimized = true labels = merge( { Environment = var.tfenv }, zipmap( [ - for x in var.eks_managed_node_groups[count.index].taints : x.key + for x in each.value.taints : x.key if x.affinity_label ], [ - for x in var.eks_managed_node_groups[count.index].taints : x.value + for x in each.value.taints : x.value if x.affinity_label ] ) ) taints = { - for taint in var.eks_managed_node_groups[count.index].taints : taint.key => { + for taint in each.value.taints : taint.key => { key = taint.key value = taint.value effect = taint.effect @@ -64,7 +66,7 @@ module "eks_managed_node_group" { tags = merge( local.kubernetes_tags, - { "Name" : var.eks_managed_node_groups[count.index].name } - # var.eks_managed_node_groups[count.index][count.index].tags != null ? var.eks_managed_node_groups[count.index][count.index].tags : [] + { "Name" : each.value.name } + # each.value[count.index].tags != null ? each.value[count.index].tags : [] ) } \ No newline at end of file From 3919d369e783a5c9080fabe9987df3076b006400 Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Sat, 10 Sep 2022 18:22:59 +0800 Subject: [PATCH 07/11] Minor updates --- cluster-nodegroups.tf | 1 - provisioning/kubernetes/argocd/data.tf | 13 ++--------- .../kubernetes/argocd/registry-secrets.tf | 23 ++++++++----------- variables.tf | 5 ++-- 4 files changed, 14 insertions(+), 28 deletions(-) diff --git a/cluster-nodegroups.tf b/cluster-nodegroups.tf index 0f93644..12750f6 100644 --- a/cluster-nodegroups.tf +++ b/cluster-nodegroups.tf @@ -67,6 +67,5 @@ module "eks_managed_node_group" { tags = merge( local.kubernetes_tags, { "Name" : each.value.name } - # each.value[count.index].tags != null ? each.value[count.index].tags : [] ) } \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/data.tf b/provisioning/kubernetes/argocd/data.tf index 0ed00f5..480331b 100644 --- a/provisioning/kubernetes/argocd/data.tf +++ b/provisioning/kubernetes/argocd/data.tf @@ -45,7 +45,7 @@ data "aws_ssm_parameter" "infrastructure_credentials_repository_password" { ################################## data "aws_ssm_parameter" "infrastructure_credentials_registry_username" { for_each = { - for secret in var.registry_secrets : secret.username => secret + for secret in coalesce(var.registry_secrets, []) : secret.username => secret if secret.secrets_store == "ssm" } @@ -54,18 +54,9 @@ data "aws_ssm_parameter" "infrastructure_credentials_registry_username" { data "aws_ssm_parameter" "infrastructure_credentials_registry_password" { for_each = { - for secret in var.registry_secrets : secret.password => secret + for secret in coalesce(var.registry_secrets, []) : secret.password => secret if secret.secrets_store == "ssm" } name = each.value.password } - -data "aws_ssm_parameter" "infrastructure_credentials_registry_auth" { - for_each = { - for secret in var.registry_secrets : secret.auth => secret - if secret.secrets_store == "ssm" - } - - name = each.value.auth -} \ No newline at end of file diff --git a/provisioning/kubernetes/argocd/registry-secrets.tf b/provisioning/kubernetes/argocd/registry-secrets.tf index 63a34d7..3e42306 100644 --- a/provisioning/kubernetes/argocd/registry-secrets.tf +++ b/provisioning/kubernetes/argocd/registry-secrets.tf @@ -1,22 +1,19 @@ -resource "kubernetes_secret" "argocd_application_registry_secrets" { - count = length(var.registry_secrets) +resource "kubernetes_secret" "regcred" { + for_each = { for regcred in coalesce(var.registry_secrets, []) : "${regcred.name}-argocd" => regcred } metadata { - name = "registry-${var.registry_secrets[count.index].name}" + name = "registry-${each.value.name}" namespace = "argocd" - labels = { - "argocd.argoproj.io/secret-type" = "docker-registry" - } } data = { - ".dockerconfigjson" = base64encode(jsonencode({ - "auths" : { - "${var.registry_secrets[count.index].url}" : { - "username" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].username : data.aws_ssm_parameter.infrastructure_credentials_registry_username[var.registry_secrets[count.index].username].value}", - "password" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].password : data.aws_ssm_parameter.infrastructure_credentials_registry_password[var.registry_secrets[count.index].password].value}", - "email" : "${var.registry_secrets[count.index].email}", - "auth" : "${var.registry_secrets[count.index].secrets_store != "ssm" ? var.registry_secrets[count.index].auth : data.aws_ssm_parameter.infrastructure_credentials_registry_auth[var.registry_secrets[count.index].auth].value}", + ".dockerconfigjson" = sensitive(jsonencode({ + auths = { + "${each.value.url}" = { + "username" = each.value.secrets_store != "ssm" ? each.value.username : data.aws_ssm_parameter.infrastructure_credentials_registry_username[each.value.username].value + "password" = each.value.secrets_store != "ssm" ? each.value.password : data.aws_ssm_parameter.infrastructure_credentials_registry_password[each.value.password].value + "email" = each.value.email + "auth" = base64encode("${each.value.secrets_store != "ssm" ? each.value.username : data.aws_ssm_parameter.infrastructure_credentials_registry_username[each.value.username].value}:${each.value.secrets_store != "ssm" ? each.value.password : data.aws_ssm_parameter.infrastructure_credentials_registry_password[each.value.password].value}") } } })) diff --git a/variables.tf b/variables.tf index f81baf7..e195624 100644 --- a/variables.tf +++ b/variables.tf @@ -353,12 +353,11 @@ variable "helm_configurations" { }))) registry_secrets = optional(list(object({ name = string - url = string username = string password = string - secrets_store = string - auth = string + url = string email = string + secrets_store = string }))) generate_plugin_repository_secret = optional(bool) })) From 976054551f97ccf0153580f91f6bb7e5f8cfd48e Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Mon, 12 Sep 2022 21:03:47 +0800 Subject: [PATCH 08/11] Enhancing prometheus/grafana/kube-state-metrics comnfiguration for default helm --- .terraform.lock.hcl | 179 +++++ README.md | 6 +- kubernetes-helm.tf | 11 +- provisioning/kubernetes/grafana/README.md | 84 --- provisioning/kubernetes/grafana/grafana.tf | 101 --- provisioning/kubernetes/monitoring/README.md | 59 ++ .../monitoring/prometheus-grafana.tf | 614 ++++++++++++++++++ .../{grafana => monitoring}/variables.tf | 4 + provisioning/kubernetes/repositories.yaml | 8 + variables.tf | 15 +- 10 files changed, 882 insertions(+), 199 deletions(-) create mode 100644 .terraform.lock.hcl delete mode 100644 provisioning/kubernetes/grafana/README.md delete mode 100644 provisioning/kubernetes/grafana/grafana.tf create mode 100644 provisioning/kubernetes/monitoring/README.md create mode 100644 provisioning/kubernetes/monitoring/prometheus-grafana.tf rename provisioning/kubernetes/{grafana => monitoring}/variables.tf (82%) diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl new file mode 100644 index 0000000..6e3ad4f --- /dev/null +++ b/.terraform.lock.hcl @@ -0,0 +1,179 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/gavinbunney/kubectl" { + version = "1.14.0" + constraints = "~> 1.14.0" + hashes = [ + "h1:ItrWfCZMzM2JmvDncihBMalNLutsAk7kyyxVRaipftY=", + "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858", + "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030", + "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5", + "zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4", + "zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5", + "zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5", + "zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf", + "zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93", + "zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65", + ] +} + +provider "registry.terraform.io/gitlabhq/gitlab" { + version = "3.18.0" + constraints = "~> 3.4" + hashes = [ + "h1:JZsPjdsOqjG6l+s96d7Awp4XZ9Fwvgv7S1kCLZExuHA=", + "zh:1c3e89cf19118fc07d7b04257251fc9897e722c16e0a0df7b07fcd261f8c12e7", + "zh:20cbbf732def9534f03270064e79e1f955bf93c13893c6cf9c9d369bb49041ef", + "zh:34e91542ab5ec7a1df0c3f4dba0c898ae7f55a61369300d20f536fb5c051a1e2", + "zh:41ad3e89ecc54e6aa1a1cc71e1d9f17eac438b0d26757f55ae196a665ba78fea", + "zh:4aa8974e99f4bd81b2ca15cc5bf73afc33e246219f4f2306a1380e619b7e3a76", + "zh:60715337e79d9cdd442f95ca51e70405fb779cf02091f6771fa285163f270538", + "zh:6e33fef76103c0ea255ca87db08072af1566354de63da826b73df74f6710a00f", + "zh:78600f026ee35710aea301a45305e96aff97f1b122ef491c3214897f82d8f4a1", + "zh:79ef39ca66539cd999b61f7bfa3a68377f7b299f23bdcbdb991d59d74df2da92", + "zh:7d66fc368e8c5a61ef5044326f74c493cf743e240bed147bc1310b2aa0836aea", + "zh:9055179712413c45ee696a7854aebebc3161442dfac58afe9548e672358315d6", + "zh:e02e3679db81f668ef80cb53225ad6cb7f6c5ee9b85a202a5ea58361fa20eb3d", + "zh:e96f1d556048d4cb80cfe0dbf5150986ff0e1d0c8a7594be5324b5aafd381ef4", + "zh:ef9c7272f4e1f0a195dc13e8f05aa1afe0dee3fc80d73c51bac26746875993ec", + "zh:fd226f57916b5ab51275ed6ddff74bce99398d327e1cc7fde4adb9315a9dc50f", + ] +} + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.30.0" + constraints = ">= 2.23.0, >= 2.50.0, >= 3.0.0, >= 3.28.0, >= 3.63.0, >= 3.72.0, ~> 4.5, >= 4.9.0" + hashes = [ + "h1:/TOHrFrfQaj16peTH3D7JmEgqAVyO6EpHNxaq1qxIoE=", + "zh:08213f3ba960621448754211f148730edb59194919ee476b0231b769a5355028", + "zh:29c90d6f8bdae0e1469417ade28fa79c74c2af49593c1e2f24f07bacbca9e2c9", + "zh:5c6e9fab64ad68de6cd4ec6cbb20b0f75ba1e51a8efaeda3fe65419f096a06cb", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:9bf42718580e8c5097227df34e1bfa0a10a23eac9f527d97c2819c163087b402", + "zh:9f87e42e0f3d145fb0ad4aaff7ddded5720a64f9303956b33bd274c6dd05c05b", + "zh:bf0519ed9615bc408b72a0aebe1cc075d4c2042325590ba13dd264cd264907ea", + "zh:c3ac9e1cbd0935614f5a3c9cdb4cf9c6a1045937fe38e61da7c5c0fb7a069870", + "zh:d0c184476ada38c50acc068214ed1252b4fcf80b6be900fc1aed32cbb49f8ff6", + "zh:d4987dc7b7a69ea58f2b3ff0ea4ffc1b61a97881dbb8583c9fcf9444b753a6c2", + "zh:e8037376c81aeb98d8286dc19fba7f8eb053444d4b9484ea6a922382cffc1a85", + "zh:ecdabb44b48addc8483bca7bd683614a347367ae950ca8b6a6880679f5c12abd", + ] +} + +provider "registry.terraform.io/hashicorp/cloudinit" { + version = "2.2.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:Id6dDkpuSSLbGPTdbw49bVS/7XXHu/+d7CJoGDqtk5g=", + "zh:76825122171f9ea2287fd27e23e80a7eb482f6491a4f41a096d77b666896ee96", + "zh:795a36dee548e30ca9c9d474af9ad6d29290e0a9816154ad38d55381cd0ab12d", + "zh:9200f02cb917fb99e44b40a68936fd60d338e4d30a718b7e2e48024a795a61b9", + "zh:a33cf255dc670c20678063aa84218e2c1b7a67d557f480d8ec0f68bc428ed472", + "zh:ba3c1b2cd0879286c1f531862c027ec04783ece81de67c9a3b97076f1ce7f58f", + "zh:bd575456394428a1a02191d2e46af0c00e41fd4f28cfe117d57b6aeb5154a0fb", + "zh:c68dd1db83d8437c36c92dc3fc11d71ced9def3483dd28c45f8640cfcd59de9a", + "zh:cbfe34a90852ed03cc074601527bb580a648127255c08589bc3ef4bf4f2e7e0c", + "zh:d6ffd7398c6d1f359b96f5b757e77b99b339fbb91df1b96ac974fe71bc87695c", + "zh:d9c15285f847d7a52df59e044184fb3ba1b7679fd0386291ed183782683d9517", + "zh:f7dd02f6d36844da23c9a27bb084503812c29c1aec4aba97237fec16860fdc8c", + ] +} + +provider "registry.terraform.io/hashicorp/helm" { + version = "2.6.0" + constraints = "~> 2.0" + hashes = [ + "h1:i+fbwv8Vk8n5kQc+spEtzvCNF4yo2exzSAZhL0ipFuo=", + "zh:0ac248c28acc1a4fd11bd26a85e48ab78dd6abf0f7ac842bf1cd7edd05ac6cf8", + "zh:3d32c8deae3740d8c5310136cc11c8afeffc350fbf88afaca0c34a223a5246f5", + "zh:4055a27489733d19ca7fa2dfce14d323fe99ae9dede7d0fea21ee6db0b9ca74b", + "zh:58a8ed39653fd4c874a2ecb128eccfa24c94266a00e349fd7fb13e22ad81f381", + "zh:6c81508044913f25083de132d0ff81d083732aba07c506cc2db05aa0cefcde2c", + "zh:7db5d18093047bfc4fe597f79610c0a281b21db0d61b0bacb3800585e976f814", + "zh:8269207b7422db99e7be80a5352d111966c3dfc7eb98511f11c8ff7b2e813456", + "zh:b1d7ababfb2374e72532308ff442cc906b79256b66b3fe7a98d42c68c4ddf9c5", + "zh:ca63e226cbdc964a5d63ef21189f059ce45c3fa4a5e972204d6916a9177d2b44", + "zh:d205a72d60e8cc362943d66f5bcdd6b6aaaa9aab2b89fd83bf6f1978ac0b1e4c", + "zh:db47dc579a0e68e5bfe3a61f2e950e6e2af82b1f388d1069de014a937962b56a", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.11.0" + constraints = ">= 2.10.0, ~> 2.11.0" + hashes = [ + "h1:lSh/Q5vX73hHL80TtGn2Vrv1UYLzlIRjC+xaCijY4ew=", + "zh:143a19dd0ea3b07fc5e3d9231f3c2d01f92894385c98a67327de74c76c715843", + "zh:1fc757d209e09c3cf7848e4274daa32408c07743698fbed10ee52a4a479b62b6", + "zh:22dfebd0685749c51a8f765d51a1090a259778960ac1cd4f32021a325b2b9b72", + "zh:3039b3b76e870cd8fc404cf75a29c66b171c6ba9b6182e131b6ae2ca648ec7c0", + "zh:3af0a15562fcab4b5684b18802e0239371b2b8ff9197ed069ff4827f795a002b", + "zh:50aaf20336d1296a73315adb66f7687f75bd5c6b1f93a894b95c75cc142810ec", + "zh:682064fabff895ec351860b4fe0321290bbbb17c2a410b62c9bea0039400650e", + "zh:70ac914d5830b3371a2679d8f77cc20c419a6e12925145afae6c977c8eb90934", + "zh:710aa02cccf7b0f3fb50880d6d2a7a8b8c9435248666616844ba71f74648cddc", + "zh:88e418118cd5afbdec4984944c7ab36950bf48e8d3e09e090232e55eecfb470b", + "zh:9cef159377bf23fa331f8724fdc6ce27ad39a217a4bae6df3b1ca408fc643da6", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.2.3" + hashes = [ + "h1:FvRIEgCmAezgZUqb2F+PZ9WnSSnR5zbEM2ZI+GLmbMk=", + "zh:04f0978bb3e052707b8e82e46780c371ac1c66b689b4a23bbc2f58865ab7d5c0", + "zh:6484f1b3e9e3771eb7cc8e8bab8b35f939a55d550b3f4fb2ab141a24269ee6aa", + "zh:78a56d59a013cb0f7eb1c92815d6eb5cf07f8b5f0ae20b96d049e73db915b238", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8aa9950f4c4db37239bcb62e19910c49e47043f6c8587e5b0396619923657797", + "zh:996beea85f9084a725ff0e6473a4594deb5266727c5f56e9c1c7c62ded6addbb", + "zh:9a7ef7a21f48fabfd145b2e2a4240ca57517ad155017e86a30860d7c0c109de3", + "zh:a63e70ac052aa25120113bcddd50c1f3cfe61f681a93a50cea5595a4b2cc3e1c", + "zh:a6e8d46f94108e049ad85dbed60354236dc0b9b5ec8eabe01c4580280a43d3b8", + "zh:bb112ce7efbfcfa0e65ed97fa245ef348e0fd5bfa5a7e4ab2091a9bd469f0a9e", + "zh:d7bec0da5c094c6955efed100f3fe22fca8866859f87c025be1760feb174d6d9", + "zh:fb9f271b72094d07cef8154cd3d50e9aa818a0ea39130bc193132ad7b23076fd", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.4.3" + hashes = [ + "h1:saZR+mhthL0OZl4SyHXZraxyaBNVMxiZzks78nWcZ2o=", + "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", + "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", + "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", + "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", + "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", + "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", + "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", + "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", + "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", + "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "4.0.2" + constraints = ">= 3.0.0" + hashes = [ + "h1:OXcWU4ib4ZVejsBy//wVoTiw23RHTqauFbEzN5oLRrU=", + "zh:080ac1021049927025e00bf42137658a24660e0e88f150041cc2a9a2a023006f", + "zh:122def74983c5f31e76903bb71fa1991bd187fa52f48efaa7216bc70806370a8", + "zh:42c1fa0a0b3fb49b8e7d019d59af72617b250c0d5b9c046ef97c613139ecb3a3", + "zh:4b903068f01627fdf96a5c5dd4183f4287d75cfe6c4737ae85a76c0df7bcf1d4", + "zh:55aed8d7e17e6fc44af9168905bd397600d5dbada202c93ec0a6e1fcb3dead7e", + "zh:6422ef06e6bb20689c35a6e8ddbbac135267251b7729c65d8dcfb1f8f0206b3b", + "zh:99427862b9fa30c1c73f99b4e43ba4991fe216950cb71ed012e204cd9318b1ae", + "zh:acf507f08e8ecacb5cb27d96ad089cfcdd272e0d8d3635a60539f341e3bd3638", + "zh:cf474324f10700faf959e568de9d6db4e60f9d3f886f7dc965d09459bdf17be9", + "zh:d474b6cee058b3619bdfca38f9bd596a7cc33556f57b26754a6a58269c1811bb", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f718146ad9ff7a44d446e3ccd771ea2b3b14aec233e1f13388d0d67581ef1cfb", + ] +} diff --git a/README.md b/README.md index d06256a..70b4917 100644 --- a/README.md +++ b/README.md @@ -123,9 +123,9 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | ~> 18.23.0 | | [elastic-stack](#module\_elastic-stack) | ./provisioning/kubernetes/elastic-stack | n/a | | [gitlab-k8s-agent](#module\_gitlab-k8s-agent) | ./provisioning/kubernetes/gitlab-kubernetes-agent | n/a | -| [grafana](#module\_grafana) | ./provisioning/kubernetes/grafana | n/a | | [kubernetes-dashboard](#module\_kubernetes-dashboard) | ./provisioning/kubernetes/kubernetes-dashboard | n/a | | [metrics-server](#module\_metrics-server) | ./provisioning/kubernetes/metrics-server | n/a | +| [monitoring-stack](#module\_monitoring-stack) | ./provisioning/kubernetes/monitoring | n/a | | [nginx-controller-ingress](#module\_nginx-controller-ingress) | ./provisioning/kubernetes/nginx-controller | n/a | | [stakater-reloader](#module\_stakater-reloader) | ./provisioning/kubernetes/stakater-reloader | n/a | | [subnet\_addrs](#module\_subnet\_addrs) | hashicorp/subnets/cidr | 1.0.0 | @@ -184,8 +184,8 @@ MIT Licensed. See [LICENSE](https://gitlab.com/magnetic-asia/infrastructure-as-c | [google\_authDomain](#input\_google\_authDomain) | Used for Infrastructure OAuth: Google Auth Domain | `any` | n/a | yes | | [google\_clientID](#input\_google\_clientID) | Used for Infrastructure OAuth: Google Auth Client ID | `any` | n/a | yes | | [google\_clientSecret](#input\_google\_clientSecret) | Used for Infrastructure OAuth: Google Auth Client Secret | `any` | n/a | yes | -| [helm\_configurations](#input\_helm\_configurations) | n/a |
object({
dashboard = optional(string)
gitlab_runner = optional(string)
vault_consul = optional(object({
consul_values = optional(string)
vault_values = optional(string)
enable_aws_vault_unseal = optional(bool) # If Vault is enabled and deployed, by default, the unseal process is manual; Changing this to true allows for automatic unseal using AWS KMS"
vault_nodeselector = optional(string) # Allow for vault node selectors without extensive reconfiguration of the standard values file
vault_tolerations = optional(string) # Allow for tolerating certain taint on nodes, example usage, string:'NoExecute:we_love_hashicorp:true'
}))
ingress = optional(object({
nginx_values = optional(string)
certmanager_values = optional(string)
}))
elasticstack = optional(string)
grafana = optional(string)
argocd = optional(object({
value_file = optional(string)
application_set = optional(list(string))
repository_secrets = optional(list(object({
name = string
url = string
type = string
username = string
password = string
secrets_store = string
})))
credential_templates = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
})))
registry_secrets = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
auth = string
email = string
})))
generate_plugin_repository_secret = optional(bool)
}))
})
|
{
"argocd": null,
"dashboard": null,
"elasticstack": null,
"gitlab_runner": null,
"grafana": null,
"ingress": null,
"vault_consul": null
}
| no | -| [helm\_installations](#input\_helm\_installations) | n/a |
object({
dashboard = bool
gitlab_runner = bool
gitlab_k8s_agent = bool
vault_consul = bool
ingress = bool
elasticstack = bool
grafana = bool
argocd = bool
stakater_reloader = bool
metrics_server = bool
})
|
{
"argocd": false,
"dashboard": true,
"elasticstack": false,
"gitlab_k8s_agent": false,
"gitlab_runner": false,
"grafana": true,
"ingress": true,
"metrics_server": true,
"stakater_reloader": false,
"vault_consul": true
}
| no | +| [helm\_configurations](#input\_helm\_configurations) | n/a |
object({
dashboard = optional(string)
gitlab_runner = optional(string)
vault_consul = optional(object({
consul_values = optional(string)
vault_values = optional(string)
enable_aws_vault_unseal = optional(bool) # If Vault is enabled and deployed, by default, the unseal process is manual; Changing this to true allows for automatic unseal using AWS KMS"
vault_nodeselector = optional(string) # Allow for vault node selectors without extensive reconfiguration of the standard values file
vault_tolerations = optional(string) # Allow for tolerating certain taint on nodes, example usage, string:'NoExecute:we_love_hashicorp:true'
}))
ingress = optional(object({
nginx_values = optional(string)
certmanager_values = optional(string)
}))
elasticstack = optional(string)
monitoring = optional(object({
values = optional(string)
version = optional(string)
}))
argocd = optional(object({
value_file = optional(string)
application_set = optional(list(string))
repository_secrets = optional(list(object({
name = string
url = string
type = string
username = string
password = string
secrets_store = string
})))
credential_templates = optional(list(object({
name = string
url = string
username = string
password = string
secrets_store = string
})))
registry_secrets = optional(list(object({
name = string
username = string
password = string
url = string
email = string
secrets_store = string
})))
generate_plugin_repository_secret = optional(bool)
}))
})
|
{
"argocd": null,
"dashboard": null,
"elasticstack": null,
"gitlab_runner": null,
"ingress": null,
"monitoring": null,
"vault_consul": null
}
| no | +| [helm\_installations](#input\_helm\_installations) | n/a |
object({
dashboard = bool
gitlab_runner = bool
gitlab_k8s_agent = bool
vault_consul = bool
ingress = bool
elasticstack = bool
monitoring = bool
argocd = bool
stakater_reloader = bool
metrics_server = bool
})
|
{
"argocd": false,
"dashboard": false,
"elasticstack": false,
"gitlab_k8s_agent": false,
"gitlab_runner": false,
"ingress": true,
"metrics_server": true,
"monitoring": true,
"stakater_reloader": false,
"vault_consul": false
}
| no | | [instance\_desired\_size](#input\_instance\_desired\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `2` | no | | [instance\_max\_size](#input\_instance\_max\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `4` | no | | [instance\_min\_size](#input\_instance\_min\_size) | Count of instances to be spun up within the context of a kubernetes cluster. Minimum: 2 | `number` | `1` | no | diff --git a/kubernetes-helm.tf b/kubernetes-helm.tf index 8b66184..768fad8 100644 --- a/kubernetes-helm.tf +++ b/kubernetes-helm.tf @@ -101,7 +101,7 @@ module "stakater-reloader" { module "metrics-server" { source = "./provisioning/kubernetes/metrics-server" depends_on = [module.eks] - count = var.helm_installations.metrics_server ? 1 : 0 + count = var.helm_installations.metrics_server && !var.helm_installations.monitoring ? 1 : 0 app_namespace = var.app_namespace tfenv = var.tfenv @@ -135,10 +135,10 @@ module "elastic-stack" { tags = local.base_tags } -module "grafana" { - source = "./provisioning/kubernetes/grafana" +module "monitoring-stack" { + source = "./provisioning/kubernetes/monitoring" depends_on = [module.eks] - count = var.helm_installations.grafana ? 1 : 0 + count = var.helm_installations.monitoring ? 1 : 0 app_namespace = var.app_namespace tfenv = var.tfenv @@ -147,7 +147,8 @@ module "grafana" { google_clientSecret = var.google_clientSecret google_authDomain = var.google_authDomain - custom_manifest = var.helm_configurations.grafana + custom_manifest = var.helm_configurations.monitoring.values + custom_version = var.helm_configurations.monitoring.version } module "argocd" { diff --git a/provisioning/kubernetes/grafana/README.md b/provisioning/kubernetes/grafana/README.md deleted file mode 100644 index 83d38a3..0000000 --- a/provisioning/kubernetes/grafana/README.md +++ /dev/null @@ -1,84 +0,0 @@ -#Grafana - -We use grafana for the benefit of graphing the vast metrics of Prometheus. In this case we will use the built in Prometheus server from Gitlab that collects Kubernetes metrics. - -#Installing Grafana - -We use helm charts to install grafana. We have a custom helm chart values located at `provisioning/kubernetes/grafana/src/values.7.4.2.yaml` - -You need to change some lines to match the environment/cluster where you want grafana installed. In this case you can define the domain you want. - -For ingress: - -``` -ingress: - enabled: true - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: #{} - kubernetes.io/ingress.class: nginx - kubernetes.io/tls-acme: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod - labels: #{} - path: / - hosts: - #- chart-example.local - - .mydomain.com - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - tls: #[] - - secretName: chart-ets-shared-uat1-tls - hosts: - - .mydomain.com -``` - -For Datasources: - -``` -datasources: - datasources.yaml: - apiVersion: 1 - datasources: - - name: Prometheus - type: prometheus - url: http://prometheus-prometheus-server..svc.cluster.local - access: proxy -``` - -For oauth configuration: - -``` - server: - root_url: https://grafana..tech.mydomain.com -# This is for gitlab authentication -# auth.gitlab: -# enabled: true -# allow_sign_up: false -# client_id: -# client_secret: -# scope: read_api -# auth_url: https://gitlab.com/oauth/authorize -# token_url: https://gitlab.com/oauth/token -# api_url: https://gitlab.com/api/v4 -# allowed_groups: whitelabels - auth.google: - enabled: true - allow_sign_up: true - client_id: - client_secret: - scopes: https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email - auth_url: https://accounts.google.com/o/oauth2/auth - token_url: https://accounts.google.com/o/oauth2/token - api_url: https://www.googleapis.com/oauth2/v1/userinfo - allowed_domains: mydomain.com #email address to whitelist -``` - -Then install grafana using helm by: - -`helm install grafana grafana/grafana -f provisioning/kubernetes/grafana/src/values.v7.4.2.yaml -n monitoring` diff --git a/provisioning/kubernetes/grafana/grafana.tf b/provisioning/kubernetes/grafana/grafana.tf deleted file mode 100644 index fb1f69d..0000000 --- a/provisioning/kubernetes/grafana/grafana.tf +++ /dev/null @@ -1,101 +0,0 @@ -resource "helm_release" "grafana" { - name = "grafana-${var.app_namespace}-${var.tfenv}" - repository = "https://grafana.github.io/helm-charts" - chart = "grafana" - namespace = "monitoring" - - values = var.custom_manifest != null ? [var.custom_manifest] : [<.mydomain.com + tls: + - secretName: chart-shared-tls + hosts: + - .mydomain.com +``` + +For Datasources: + +``` +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Prometheus + type: prometheus + url: http://prometheus-prometheus-server..svc.cluster.local + access: proxy +``` + +For oauth configuration: + +``` + auth.google: + enabled: true + allow_sign_up: true + client_id: + client_secret: + scopes: https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email + auth_url: https://accounts.google.com/o/oauth2/auth + token_url: https://accounts.google.com/o/oauth2/token + api_url: https://www.googleapis.com/oauth2/v1/userinfo + allowed_domains: mydomain.com #email address to whitelist +``` diff --git a/provisioning/kubernetes/monitoring/prometheus-grafana.tf b/provisioning/kubernetes/monitoring/prometheus-grafana.tf new file mode 100644 index 0000000..12f242c --- /dev/null +++ b/provisioning/kubernetes/monitoring/prometheus-grafana.tf @@ -0,0 +1,614 @@ +resource "helm_release" "prometheus" { + name = "${var.app_namespace}-${var.tfenv}" + repository = "https://prometheus-community.github.io/helm-charts" + chart = "kube-prometheus-stack" + namespace = "monitoring" + version = coalesce(var.custom_version, "39.12.1") + + values = var.custom_manifest != null ? [var.custom_manifest] : [< Date: Thu, 29 Sep 2022 09:34:03 +0800 Subject: [PATCH 09/11] Fix: disk size to use var --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index 08414f9..c077731 100644 --- a/locals.tf +++ b/locals.tf @@ -31,7 +31,7 @@ locals { key_name = var.node_key_name public_ip = var.node_public_ip create_launch_template = var.create_launch_template - disk_size = "50" + disk_size = var.root_vol_size k8s_labels = { Environment = var.tfenv } From c1e10a434d5cd18abd41c811456a429ee076bc2f Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Sun, 9 Oct 2022 21:08:50 +0800 Subject: [PATCH 10/11] fix: custom manifests and versions for monitoring --- kubernetes-helm.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes-helm.tf b/kubernetes-helm.tf index 768fad8..837f63c 100644 --- a/kubernetes-helm.tf +++ b/kubernetes-helm.tf @@ -147,8 +147,8 @@ module "monitoring-stack" { google_clientSecret = var.google_clientSecret google_authDomain = var.google_authDomain - custom_manifest = var.helm_configurations.monitoring.values - custom_version = var.helm_configurations.monitoring.version + custom_manifest = try(var.helm_configurations.monitoring.values, null) + custom_version = try(var.helm_configurations.monitoring.version, null) } module "argocd" { From 25aa58aab7b6fa3caa112028d27795d85c91a0e5 Mon Sep 17 00:00:00 2001 From: Aaron Baideme Date: Mon, 17 Oct 2022 23:47:43 +0800 Subject: [PATCH 11/11] Unnecessary item --- provisioning/kubernetes/monitoring/prometheus-grafana.tf | 1 - 1 file changed, 1 deletion(-) diff --git a/provisioning/kubernetes/monitoring/prometheus-grafana.tf b/provisioning/kubernetes/monitoring/prometheus-grafana.tf index 12f242c..c2c5f27 100644 --- a/provisioning/kubernetes/monitoring/prometheus-grafana.tf +++ b/provisioning/kubernetes/monitoring/prometheus-grafana.tf @@ -139,7 +139,6 @@ grafana: enabled: "true" ingressClassName: nginx annotations: - kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" cert-manager.io/cluster-issuer: letsencrypt-prod labels: