Skip to content

Commit

Permalink
Merge pull request #275 from ComputeCanada/volumes
Browse files Browse the repository at this point in the history
Move volumes in instance specs
  • Loading branch information
cmd-ntrf authored May 14, 2024
2 parents 2a2657d + 351d287 commit ad6f76e
Show file tree
Hide file tree
Showing 7 changed files with 71 additions and 120 deletions.
22 changes: 10 additions & 12 deletions aws/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -189,17 +188,6 @@ resource "aws_volume_attachment" "attachments" {
}
locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [ for key, volume in module.design.volumes:
"/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, "-", "")}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? aws_eip.public_ip[x].public_ip : ""
Expand All @@ -212,6 +200,16 @@ locals {
gpus = try(one(data.aws_ec2_instance_type.instance_type[values.prefix].gpus).count, 0)
mig = lookup(values, "mig", null)
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => merge(
{ glob = "/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${x}-${pv_key}-${name}"].id, "-", "")}" },
specs,
)
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
22 changes: 10 additions & 12 deletions azure/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -149,17 +148,6 @@ resource "azurerm_virtual_machine_data_disk_attachment" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[volume.instance], replace(key, "${volume.instance}-", ""))}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

resource_group_name = var.azure_resource_group == "" ? azurerm_resource_group.group[0].name : var.azure_resource_group

vmsizes = jsondecode(file("${path.module}/vmsizes.json"))
Expand All @@ -175,6 +163,16 @@ locals {
gpus = local.vmsizes[values.type].gpus
mig = lookup(values, "mig", null)
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => merge(
{ glob = "/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[x], replace(pv_key, "${x}-", ""))}" },
specs,
)
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
2 changes: 0 additions & 2 deletions common/configuration/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ variable "cloud_provider" { }
variable "cloud_region" { }
variable "domain_name" { }
variable "cluster_name" { }
variable "volume_devices" { }
variable "guest_passwd" { }

variable "generate_ssh_key" { }
Expand Down Expand Up @@ -73,7 +72,6 @@ locals {
terraform = {
instances = local.inventory
tag_ip = local.tag_ip
volumes = var.volume_devices
data = {
sudoer_username = var.sudoer_username
public_keys = local.ssh_authorized_keys
Expand Down
31 changes: 4 additions & 27 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -1816,33 +1816,10 @@ Puppet data hierarchy.
### 10.14 Expand a volume
Volumes defined in the `volumes` map can be expanded at will. After their creation, you can
increase their size in the `main.tf` then call `terraform apply` and the associated block
device will be expanded.
To benefit from the new storage, the following commands need to be ran as root
on the instance to which the expanded volume is attached.
1. Identify the physical volume path
```
pvscan
```
2. Expand the physical volume
```
pvresize /dev/vdxyz # replace vdxyz by the volume identify at step 1
```
3. Identify the volume group path
```
lvdisplay
```
4. Expand the volume group using step volume group path identified
```
lvextend -l '+100%FREE' -r /dev/project_vg/project
```
5. Resize the XFS filesystem:
```
xfs_growfs /dev/project_vg/project
```
Volumes defined in the `volumes` map can be expanded at will. To enable online extension of
a volume, add `enable_resize = true` to its specs map. You can then increase the size at will.
The corresponding volume will be expanded by the cloud provider and the filesystem will be
extended by Puppet.
## 11. Customize Magic Castle Terraform Files
Expand Down
69 changes: 26 additions & 43 deletions docs/design.md
Original file line number Diff line number Diff line change
Expand Up @@ -201,26 +201,18 @@ the `module.design.instances` map.
7. **Create the volumes**. In `infrastructure.tf`, define the `volumes` resource using
`module.design.volumes`.

8. **Consolidate the volume device information**. In `infrastructure.tf`, define a local
variable named `volume_devices` implementing the following logic in HCL. Replace
the line starting by `/dev/disk/by-id` with the proper logic that would match the volume
resource to its device path from within the instance to which it is attached.
8. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram, volumes). For the volumes, you need to provide the paths under which the volumes will be found on the instances to which they are attached. This is typically derived from the volume id. Here is an example:
```hcl
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/*${substr(provider_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/*${substr(provider.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"]
} if contains(values.tags, pv_key)
} : {}
```

9. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram).

10. **Create the instance configurations**. In `infrastructure.tf`, include the
9. **Create the instance configurations**. In `infrastructure.tf`, include the
`common/configuration` module like this:
```hcl
module "configuration" {
Expand All @@ -231,7 +223,6 @@ resource to its device path from within the instance to which it is attached.
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
cluster_name = var.cluster_name
guest_passwd = var.guest_passwd
Expand All @@ -241,15 +232,15 @@ resource to its device path from within the instance to which it is attached.
cloud_region = local.cloud_region
}
```
11. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using
10. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using
`module.design.instances_to_build` for the instance attributes and `module.configuration.user_data`
for the initial configuration.

12. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using
11. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using
`module.design.volumes` and refer to the attribute `each.value.instance` to retrieve the
instance's id to which the volume needs to be attached.

13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
that contains the attributes of instances that are publicly accessible from Internet and their ids.
```hcl
locals {
Expand All @@ -260,7 +251,7 @@ that contains the attributes of instances that are publicly accessible from Inte
}
```

14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
`common/provision` module like this
```hcl
module "provision" {
Expand Down Expand Up @@ -360,21 +351,7 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
}
```
8. **Consolidate the volume devices' information**. Add the following snippet to `infrastructure.tf`:
```hcl
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${volume["instance"]}-${ki}-${kj}"].id, "d-", "")}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}
```
9. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`:
8. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`:
```hcl
locals {
inventory = { for x, values in module.design.instances :
Expand All @@ -387,13 +364,20 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
cpus = ...
gpus = ...
ram = ...
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => ["/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${x}-${pv_key}-${name}"].id, "d-", "")}"]
} if contains(values.tags, pv_key)
} : {}
}
}
}
}
```
10. **Create the instance configurations**. In `infrastructure.tf`, include the
9. **Create the instance configurations**. In `infrastructure.tf`, include the
`common/configuration` module like this:
```hcl
module "configuration" {
Expand All @@ -404,7 +388,6 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
cluster_name = var.cluster_name
guest_passwd = var.guest_passwd
Expand All @@ -415,21 +398,21 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t
}
```
11. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`:
10. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`:
```hcl
resource "alicloud_instance" "instances" {
for_each = module.design.instances
}
```
12. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`:
11. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`:
```hcl
resource "alicloud_disk_attachment" "attachments" {
for_each = module.design.volumes
}
```
13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances`
that contains the attributes of instances that are publicly accessible from Internet and their ids.
```hcl
locals {
Expand All @@ -440,7 +423,7 @@ that contains the attributes of instances that are publicly accessible from Inte
}
```
14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the
`common/provision` module like this
```hcl
module "provision" {
Expand Down
22 changes: 10 additions & 12 deletions gcp/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -162,17 +161,6 @@ resource "google_compute_attached_disk" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/google-${var.cluster_name}-${volume["instance"]}-${ki}-${kj}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? google_compute_address.public_ip[x].address : ""
Expand All @@ -185,6 +173,16 @@ locals {
gpus = try(data.external.machine_type[values["prefix"]].result["gpus"], lookup(values, "gpu_count", 0))
mig = lookup(values, "mig", null)
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => merge(
{ glob = "/dev/disk/by-id/google-${var.cluster_name}-${x}-${pv_key}-${name}"},
specs,
)
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down
23 changes: 11 additions & 12 deletions openstack/infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ module "configuration" {
sudoer_username = var.sudoer_username
generate_ssh_key = var.generate_ssh_key
public_keys = var.public_keys
volume_devices = local.volume_devices
domain_name = module.design.domain_name
bastion_tag = module.design.bastion_tag
cluster_name = var.cluster_name
Expand Down Expand Up @@ -107,6 +106,7 @@ resource "openstack_blockstorage_volume_v3" "volumes" {
size = each.value.size
volume_type = lookup(each.value, "type", null)
snapshot_id = lookup(each.value, "snapshot", null)
enable_online_resize = lookup(each.value, "enable_resize", false)
}

resource "openstack_compute_volume_attach_v2" "attachments" {
Expand All @@ -116,17 +116,6 @@ resource "openstack_compute_volume_attach_v2" "attachments" {
}

locals {
volume_devices = {
for ki, vi in var.volumes :
ki => {
for kj, vj in vi :
kj => [for key, volume in module.design.volumes :
"/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}"
if key == "${volume["instance"]}-${ki}-${kj}"
]
}
}

inventory = { for x, values in module.design.instances :
x => {
public_ip = contains(values.tags, "public") ? local.public_ip[x] : ""
Expand All @@ -142,6 +131,16 @@ locals {
])
mig = lookup(values, "mig", null)
}
volumes = contains(keys(module.design.volume_per_instance), x) ? {
for pv_key, pv_values in var.volumes:
pv_key => {
for name, specs in pv_values:
name => merge(
{ glob = "/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}" },
specs,
)
} if contains(values.tags, pv_key)
} : {}
}
}

Expand Down

0 comments on commit ad6f76e

Please sign in to comment.