Skip to content

Commit

Permalink
azurerm-linux-vm: use azurerm_virtual_machine
Browse files Browse the repository at this point in the history
We have to fall back to using this resource until
hashicorp/terraform-provider-azurerm#6117
is fixed.

With `azurerm_linux_virtual_machine` and
`azurerm_virtual_machine_data_disk_attachment` the disk only gets
attached once the VM is booted up, and the VM can't boot up if it waits
for the data disk to appear.
  • Loading branch information
flokli committed Dec 13, 2023
1 parent d25787d commit 33bc5b2
Show file tree
Hide file tree
Showing 8 changed files with 109 additions and 51 deletions.
4 changes: 0 additions & 4 deletions hosts/binary-cache/configuration.nix
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,6 @@
device = "/dev/disk/by-lun/10";
fsType = "ext4";
options = [
# Due to https://github.com/hashicorp/terraform-provider-azurerm/issues/6117
# disks get attached later during boot.
# The default of 90s doesn't seem to be sufficient.
"x-systemd.device-timeout=5min"
"x-systemd.makefs"
"x-systemd.growfs"
];
Expand Down
4 changes: 0 additions & 4 deletions hosts/jenkins-controller/configuration.nix
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@
device = "/dev/disk/by-lun/10";
fsType = "ext4";
options = [
# Due to https://github.com/hashicorp/terraform-provider-azurerm/issues/6117
# disks get attached later during boot.
# The default of 90s doesn't seem to be sufficient.
"x-systemd.device-timeout=5min"
"x-systemd.makefs"
"x-systemd.growfs"
];
Expand Down
1 change: 1 addition & 0 deletions nix/devshell.nix
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
p.azurerm
p.external
p.null
p.random
p.sops
]))
];
Expand Down
19 changes: 11 additions & 8 deletions terraform/jenkins/binary_cache.tf
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,17 @@ module "binary_cache_vm" {
})])

subnet_id = azurerm_subnet.binary_cache.id

# Attach disk to the VM
data_disks = [{
name = azurerm_managed_disk.binary_cache_caddy_state.name
managed_disk_id = azurerm_managed_disk.binary_cache_caddy_state.id
virtual_machine_id = module.jenkins_controller_vm.virtual_machine_id
lun = "10"
create_option = "Attach"
caching = "None"
disk_size_gb = azurerm_managed_disk.binary_cache_caddy_state.disk_size_gb
}]
}

resource "azurerm_subnet" "binary_cache" {
Expand Down Expand Up @@ -98,11 +109,3 @@ resource "azurerm_managed_disk" "binary_cache_caddy_state" {
create_option = "Empty"
disk_size_gb = 1
}

# Attach to the VM
resource "azurerm_virtual_machine_data_disk_attachment" "binary_cache_vm_caddy_state" {
managed_disk_id = azurerm_managed_disk.binary_cache_caddy_state.id
virtual_machine_id = module.binary_cache_vm.virtual_machine_id
lun = "10"
caching = "None"
}
18 changes: 10 additions & 8 deletions terraform/jenkins/jenkins_controller.tf
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,16 @@ module "jenkins_controller_vm" {
})])

subnet_id = azurerm_subnet.jenkins.id

# Attach disk to the VM
data_disks = [{
name = azurerm_managed_disk.jenkins_controller_jenkins_state.name
managed_disk_id = azurerm_managed_disk.jenkins_controller_jenkins_state.id
lun = "10"
# create_option = "Attach"
caching = "None"
disk_size_gb = azurerm_managed_disk.jenkins_controller_jenkins_state.disk_size_gb
}]
}

resource "azurerm_network_interface_security_group_association" "jenkins_controller_vm" {
Expand Down Expand Up @@ -74,11 +84,3 @@ resource "azurerm_managed_disk" "jenkins_controller_jenkins_state" {
create_option = "Empty"
disk_size_gb = 10
}

# Attach to the VM
resource "azurerm_virtual_machine_data_disk_attachment" "jenkins_controller_vm_jenkins_state" {
managed_disk_id = azurerm_managed_disk.jenkins_controller_jenkins_state.id
virtual_machine_id = module.jenkins_controller_vm.virtual_machine_id
lun = "10"
caching = "None"
}
10 changes: 10 additions & 0 deletions tf-modules/azurerm-linux-vm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,13 @@ SPDX-License-Identifier: Apache-2.0
# azurerm-linux-vm

Terraform module spinning up a Azure VM.

This uses the `azurerm_virtual_machine` resource to spin up the VM, as it allows
data disks to be attached on boot.

This is due to
https://github.com/hashicorp/terraform-provider-azurerm/issues/6117
- with `azurerm_linux_virtual_machine` and
`azurerm_virtual_machine_data_disk_attachment` the disk only gets attached once
the VM is booted up, and the VM can't boot up if it waits for the data disk
to appear.
3 changes: 3 additions & 0 deletions tf-modules/azurerm-linux-vm/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,6 @@ variable "subnet_id" {
description = "The subnet ID to attach to the VM and allocate an IP from"
}

variable "data_disks" {
description = "List of dict containing keys of the storage_data_disk block"
}
101 changes: 74 additions & 27 deletions tf-modules/azurerm-linux-vm/virtual_machine.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,46 +2,77 @@
#
# SPDX-License-Identifier: Apache-2.0

resource "azurerm_linux_virtual_machine" "main" {
resource "azurerm_virtual_machine" "main" {
name = var.virtual_machine_name
resource_group_name = var.resource_group_name
location = var.location
size = var.virtual_machine_size
vm_size = var.virtual_machine_size

# Unused, but required by the API. May not be root either
admin_username = "foo"
admin_password = "S00persecret"

# We *don't* support password auth, and this doesn't change anything.
# However, if we don't set this to false we need to
# specify additional pubkeys.
disable_password_authentication = false
# We can't use admin_ssh_key, as it only works for the admin_username.
delete_os_disk_on_termination = true
delete_data_disks_on_termination = false

network_interface_ids = [azurerm_network_interface.default.id]
source_image_id = var.virtual_machine_source_image

storage_image_reference {
id = var.virtual_machine_source_image
}

identity {
type = "SystemAssigned"
}

# We only set custom_data here, not user_data.
# user_data is more recent, and allows updates without recreating the machine,
# but at least cloud-init 23.1.2 blocks boot if custom_data is not set.
# (It logs about not being able to mount /dev/sr0 to /metadata).
# This can be worked around by setting custom_data to a static placeholder,
# but user_data is still ignored.
# TODO: check this again with a more recent cloud-init version.
custom_data = (var.virtual_machine_custom_data == "") ? null : base64encode(var.virtual_machine_custom_data)
os_profile {
computer_name = var.virtual_machine_name
# Unused, but required by the API. May not be root either
admin_username = "foo"
admin_password = "S00persecret"

# We only set custom_data here, not user_data.
# user_data is more recent, and allows updates without recreating the machine,
# but at least cloud-init 23.1.2 blocks boot if custom_data is not set.
# (It logs about not being able to mount /dev/sr0 to /metadata).
# This can be worked around by setting custom_data to a static placeholder,
# but user_data is still ignored.
# TODO: check this again with a more recent cloud-init version.
custom_data = (var.virtual_machine_custom_data == "") ? null : base64encode(var.virtual_machine_custom_data)
}

os_profile_linux_config {
# We *don't* support password auth, and this doesn't change anything.
# However, if we don't set this to false we need to
# specify additional pubkeys.
disable_password_authentication = false
# We can't use admin_ssh_key, as it only works for the admin_username.
}

# Enable boot diagnostics, use the managed storage account to store them
boot_diagnostics {
storage_account_uri = null
enabled = true
# azurerm_virtual_machine doesn't support the managed storage account
storage_uri = azurerm_storage_account.boot_diag.primary_blob_endpoint
}

storage_os_disk {
name = "${var.virtual_machine_name}-osdisk" # needs to be unique
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}

os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
dynamic "storage_data_disk" {
for_each = var.data_disks

content {
# use lookup here, so keys can be set optionally
name = lookup(storage_data_disk.value, "name", null)
caching = lookup(storage_data_disk.value, "caching", null)
create_option = "Attach"
# This has to be passed, even for "Attach"
disk_size_gb = lookup(storage_data_disk.value, "disk_size_gb", null)
lun = lookup(storage_data_disk.value, "lun", null)

managed_disk_type = lookup(storage_data_disk.value, "managed_disk_type", null)
managed_disk_id = lookup(storage_data_disk.value, "managed_disk_id", null)
}
}
}

Expand All @@ -66,12 +97,28 @@ resource "azurerm_public_ip" "default" {
allocation_method = "Static"
}

# Create a random string, and a storage account using that random string.
resource "random_string" "boot_diag" {
length = "8"
special = "false"
upper = false
}

resource "azurerm_storage_account" "boot_diag" {
name = "${random_string.boot_diag.result}bootdiag"
resource_group_name = var.resource_group_name
location = var.location
account_tier = "Standard"
account_replication_type = "GRS"
}


output "virtual_machine_id" {
value = azurerm_linux_virtual_machine.main.id
value = azurerm_virtual_machine.main.id
}

output "virtual_machine_identity_principal_id" {
value = azurerm_linux_virtual_machine.main.identity[0].principal_id
value = azurerm_virtual_machine.main.identity[0].principal_id
}

output "virtual_machine_network_interface_id" {
Expand Down

0 comments on commit 33bc5b2

Please sign in to comment.