diff --git a/CHANGELOG.md b/CHANGELOG.md index 90bfc7c22..0b301bda5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ ENHANCEMENTS: * data/opennebula_virtual_network: make `name` optional and enable `tags` filtering * resources/opennebula_group: add `sunstone` and `tags` sections * resources/opennebula_virtual_network: compatibility added for network states +* resources/opennebula_virtal_machine: enable VM vcpu, cpu and memory update FEATURES: diff --git a/opennebula/resource_opennebula_virtual_machine.go b/opennebula/resource_opennebula_virtual_machine.go index 0837a59a6..864f51e24 100644 --- a/opennebula/resource_opennebula_virtual_machine.go +++ b/opennebula/resource_opennebula_virtual_machine.go @@ -1166,6 +1166,65 @@ func resourceOpennebulaVirtualMachineUpdateCustom(d *schema.ResourceData, meta i } } + if d.HasChange("cpu") || d.HasChange("vcpu") || d.HasChange("memory") { + timeout := time.Duration(d.Get("timeout").(int)) * time.Minute + + vmState, _, _ := vmInfos.State() + vmRequireShutdown := vmState != vm.Poweroff && vmState != vm.Undeployed + if vmRequireShutdown { + if d.Get("hard_shutdown").(bool) { + err = vmc.PoweroffHard() + } else { + err = vmc.Poweroff() + } + if err != nil { + return fmt.Errorf( + "Poweroff for virtual machine (ID:%d) failed: %s", vmc.ID, err) + } + _, err = waitForVMState(vmc, timeout, "POWEROFF") + if err != nil { + return fmt.Errorf( + "waiting for virtual machine (ID:%d) to be in state %s: %s", vmc.ID, "POWEROFF", err) + } + } + + resizeTpl := dyn.NewTemplate() + cpu := d.Get("cpu").(float64) + if cpu > 0 { + resizeTpl.AddPair("CPU", cpu) + } + + vcpu := d.Get("vcpu").(int) + if vcpu > 0 { + resizeTpl.AddPair("VCPU", vcpu) + } + + memory := d.Get("memory").(int) + if cpu > 0 { + resizeTpl.AddPair("MEMORY", memory) + } + + err = vmc.Resize(resizeTpl.String(), true) + if err != nil { + return fmt.Errorf( + "resizing for virtual machine (ID:%d) failed: %s", vmc.ID, err) + } + + if vmRequireShutdown { + err = vmc.Resume() + if err != nil { + return fmt.Errorf( + "resume virtual machine (ID:%d) failed: %s", vmc.ID, err) + } + _, err = waitForVMState(vmc, timeout, "RUNNING") + if err != nil { + return fmt.Errorf( + "waiting for virtual machine (ID:%d) to be in state %s: %s", vmc.ID, "RUNNING", err) + } + } + log.Printf("[INFO] Successfully resized VM %s\n", vmInfos.Name) + } + if updateConf { timeout := time.Duration(d.Get("timeout").(int)) * time.Minute @@ -1548,8 +1607,14 @@ func resourceOpennebulaVirtualMachineDelete(d *schema.ResourceData, meta interfa "waiting for virtual machine (ID:%d) to be in state %s: %s", vmc.ID, strings.Join(vmDeleteReadyStates, " "), err) } - if err = vmc.TerminateHard(); err != nil { - return err + if d.Get("hard_shutdown").(bool) { + err = vmc.TerminateHard() + } else { + err = vmc.Terminate() + } + if err != nil { + return fmt.Errorf( + "Terminate VM (ID:%d) failed: %s", vmc.ID, err) } ret, err := waitForVMState(vmc, timeout, "DONE") @@ -1567,9 +1632,14 @@ func resourceOpennebulaVirtualMachineDelete(d *schema.ResourceData, meta interfa log.Printf("[INFO] retry terminate VM\n") - err := vmc.TerminateHard() + if d.Get("hard_shutdown").(bool) { + err = vmc.TerminateHard() + } else { + err = vmc.Terminate() + } if err != nil { - return err + return fmt.Errorf( + "Terminate VM (ID:%d) failed: %s", vmc.ID, err) } _, err = waitForVMState(vmc, timeout, "DONE") diff --git a/opennebula/resource_opennebula_virtual_machine_test.go b/opennebula/resource_opennebula_virtual_machine_test.go index 977769840..ae53250e0 100644 --- a/opennebula/resource_opennebula_virtual_machine_test.go +++ b/opennebula/resource_opennebula_virtual_machine_test.go @@ -486,6 +486,64 @@ func TestAccVirtualMachinePending(t *testing.T) { }) } +func TestAccVirtualMachineResize(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccVirtualMachineTemplateConfigBasic, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "memory", "128"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "cpu", "0.1"), + ), + }, + { + Config: testAccVirtualMachineTemplateAddvCPU, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "vcpu", "1"), + ), + }, + { + Config: testAccVirtualMachineResizeCpu, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "cpu", "0.3"), + ), + }, + { + Config: testAccVirtualMachineResizevCpu, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "vcpu", "2"), + ), + }, + { + Config: testAccVirtualMachineResizeMemory, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "memory", "256"), + ), + }, + { + Config: testAccVirtualMachineResizePoweroffHard, + Check: resource.ComposeTestCheckFunc( + testAccSetDSdummy(), + resource.TestCheckResourceAttr("opennebula_virtual_machine.test", "name", "test-virtual_machine"), + ), + }, + }, + }) +} + func TestAccVirtualMachineTemplateNIC(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1746,3 +1804,202 @@ resource "opennebula_virtual_machine" "test" { timeout = 5 } ` + +var testAccVirtualMachineTemplateAddvCPU = ` +resource "opennebula_virtual_machine" "test" { + name = "test-virtual_machine" + group = "oneadmin" + permissions = "642" + memory = 128 + cpu = 0.1 + vcpu = 1 + description = "VM created for provider acceptance tests" + + context = { + TESTVAR = "TEST" + NETWORK = "YES" + SET_HOSTNAME = "$NAME" + } + + graphics { + type = "VNC" + listen = "0.0.0.0" + keymap = "en-us" + } + + disk {} + + os { + arch = "x86_64" + boot = "" + } + + tags = { + env = "prod" + customer = "test" + } + + sched_requirements = "FREE_CPU > 50" + + timeout = 5 +} +` + +var testAccVirtualMachineResizeCpu = ` +resource "opennebula_virtual_machine" "test" { + name = "test-virtual_machine" + group = "oneadmin" + permissions = "642" + memory = 128 + cpu = 0.3 + description = "VM created for provider acceptance tests" + + context = { + TESTVAR = "TEST" + NETWORK = "YES" + SET_HOSTNAME = "$NAME" + } + + graphics { + type = "VNC" + listen = "0.0.0.0" + keymap = "en-us" + } + + disk {} + + os { + arch = "x86_64" + boot = "" + } + + tags = { + env = "prod" + customer = "test" + } + + sched_requirements = "FREE_CPU > 50" + + timeout = 5 +} +` + +var testAccVirtualMachineResizevCpu = ` +resource "opennebula_virtual_machine" "test" { + name = "test-virtual_machine" + group = "oneadmin" + permissions = "642" + memory = 128 + cpu = 0.3 + vcpu = 2 + description = "VM created for provider acceptance tests" + + context = { + TESTVAR = "TEST" + NETWORK = "YES" + SET_HOSTNAME = "$NAME" + } + + graphics { + type = "VNC" + listen = "0.0.0.0" + keymap = "en-us" + } + + disk {} + + os { + arch = "x86_64" + boot = "" + } + + tags = { + env = "prod" + customer = "test" + } + + sched_requirements = "FREE_CPU > 50" + + timeout = 5 +} +` + +var testAccVirtualMachineResizeMemory = ` +resource "opennebula_virtual_machine" "test" { + name = "test-virtual_machine" + group = "oneadmin" + permissions = "642" + memory = 256 + cpu = 0.3 + vcpu = 2 + description = "VM created for provider acceptance tests" + context = { + TESTVAR = "TEST" + NETWORK = "YES" + SET_HOSTNAME = "$NAME" + } + + graphics { + type = "VNC" + listen = "0.0.0.0" + keymap = "en-us" + } + + disk {} + + os { + arch = "x86_64" + boot = "" + } + + tags = { + env = "prod" + customer = "test" + } + + sched_requirements = "FREE_CPU > 50" + + timeout = 5 +} +` + +var testAccVirtualMachineResizePoweroffHard = ` +resource "opennebula_virtual_machine" "test" { + name = "test-virtual_machine" + group = "oneadmin" + permissions = "642" + memory = 256 + cpu = 0.3 + vcpu = 2 + hard_shutdown = true + description = "VM created for provider acceptance tests" + + context = { + TESTVAR = "TEST" + NETWORK = "YES" + SET_HOSTNAME = "$NAME" + } + + graphics { + type = "VNC" + listen = "0.0.0.0" + keymap = "en-us" + } + + disk {} + + os { + arch = "x86_64" + boot = "" + } + + tags = { + env = "prod" + customer = "test" + } + + sched_requirements = "FREE_CPU > 50" + + timeout = 5 +} +` diff --git a/opennebula/shared_schemas.go b/opennebula/shared_schemas.go index f9f6a04a9..d7574a74f 100644 --- a/opennebula/shared_schemas.go +++ b/opennebula/shared_schemas.go @@ -61,6 +61,12 @@ func commonVMSchemas() map[string]*schema.Schema { }, "template_disk": templateDiskVMSchema(), "disk": diskVMSchema(), + "hard_shutdown": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Immediately poweroff/terminate/reboot/undeploy the VM. (default: false)", + }, }, ) } diff --git a/website/docs/r/virtual_machine.html.markdown b/website/docs/r/virtual_machine.html.markdown index df29c68b7..b0e7671b6 100644 --- a/website/docs/r/virtual_machine.html.markdown +++ b/website/docs/r/virtual_machine.html.markdown @@ -100,6 +100,7 @@ The following arguments are supported: * `timeout` - (Optional) Timeout (in Minutes) for VM availability. Defaults to 3 minutes. * `lock` - (Optional) Lock the VM with a specific lock level. Supported values: `USE`, `MANAGE`, `ADMIN`, `ALL` or `UNLOCK`. * `on_disk_change` - (Optional) Select the behavior for changing disk images. Supported values: `RECREATE` or `SWAP` (default). `RECREATE` forces recreation of the vm and `SWAP` adopts the standard behavior of hot-swapping the disks. NOTE: This property does not affect the behavior of adding new disks. +* `hard_shutdown` - (Optional) If the VM doesn't have ACPI support, it immediately poweroff/terminate/reboot/undeploy the VM. Defaults to false. ### Graphics parameters