diff --git a/docs/data-sources/vm.md b/docs/data-sources/vm.md
index c535128..1546454 100644
--- a/docs/data-sources/vm.md
+++ b/docs/data-sources/vm.md
@@ -52,6 +52,7 @@ Read-Only:
- `disks` (Attributes List) List of disks (see [below for nested schema](#nestedatt--vms--disks))
- `name` (String)
- `power_state` (String)
+- `snapshot_schedule_uuid` (String) UUID of the applied snapshot schedule for creating automated snapshots
- `uuid` (String)
diff --git a/docs/resources/vm.md b/docs/resources/vm.md
index 8f62f3e..53528ba 100644
--- a/docs/resources/vm.md
+++ b/docs/resources/vm.md
@@ -28,8 +28,9 @@ resource "hypercore_vm" "myvm" {
name = local.vm_name
description = "some description"
- vcpu = 4
- memory = 4096 # MiB
+ vcpu = 4
+ memory = 4096 # MiB
+ snapshot_schedule_uuid = data.hypercore_vm.clone_source_vm.vms.0.snapshot_schedule_uuid
clone = {
source_vm_uuid = data.hypercore_vm.clone_source_vm.vms.0.uuid
@@ -63,6 +64,7 @@ output "vm_uuid" {
- `description` (String) Description of this VM
- `group` (String) Group/tag to create this VM in
- `memory` (Number) Memory (RAM) size in `MiB`: If the cloned VM was already created
and it's memory was modified, the cloned VM will be rebooted (either gracefully or forcefully)
+- `snapshot_schedule_uuid` (String) UUID of the snapshot schedule to create automatic snapshots
- `vcpu` (Number) Number of CPUs on this VM. If the cloned VM was already created and it's
`VCPU` was modified, the cloned VM will be rebooted (either gracefully or forcefully)
### Read-Only
diff --git a/docs/resources/vm_snapshot.md b/docs/resources/vm_snapshot.md
new file mode 100644
index 0000000..d9e9731
--- /dev/null
+++ b/docs/resources/vm_snapshot.md
@@ -0,0 +1,58 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "hypercore_vm_snapshot Resource - hypercore"
+subcategory: ""
+description: |-
+ Hypercore VM snapshot resource to manage VM snapshots
+---
+
+# hypercore_vm_snapshot (Resource)
+
+Hypercore VM snapshot resource to manage VM snapshots
+
+## Example Usage
+
+```terraform
+locals {
+ vm_name = "example-vm-one"
+ another_vm_name = "example-vm-two"
+}
+
+data "hypercore_vm" "example-vm-one" {
+ name = local.vm_name
+}
+
+data "hypercore_vm" "example-vm-two" {
+ name = local.another_vm_name
+}
+
+resource "hypercore_vm_snapshot" "snapshot" {
+ vm_uuid = data.hypercore_vm.example-vm-one.vms.0.uuid
+ label = "my-snapshot"
+}
+
+resource "hypercore_vm_snapshot" "imported-snapshot" {
+ vm_uuid = data.hypercore_vm.example-vm-two.vms.0.uuid
+}
+
+import {
+ to = hypercore_vm_snapshot.imported-snapshot
+ id = "24ab2255-ca77-49ec-bc96-f469cec3affb"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `vm_uuid` (String) VM UUID of which we want to create a snapshot.
+
+### Optional
+
+- `label` (String) Snapshot label.
+
+### Read-Only
+
+- `id` (String) VM snapshot identifier
+- `type` (String) Snapshot type. Can be: USER, AUTOMATED, SUPPORT
diff --git a/docs/resources/vm_snapshot_schedule.md b/docs/resources/vm_snapshot_schedule.md
new file mode 100644
index 0000000..b0d99e4
--- /dev/null
+++ b/docs/resources/vm_snapshot_schedule.md
@@ -0,0 +1,88 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "hypercore_vm_snapshot_schedule Resource - hypercore"
+subcategory: ""
+description: |-
+ Hypercore VM snapshot schedule resource to manage VM snapshots
+---
+
+# hypercore_vm_snapshot_schedule (Resource)
+
+Hypercore VM snapshot schedule resource to manage VM snapshots
+
+## Example Usage
+
+```terraform
+locals {
+ vm_name = "example-vm-one"
+ another_vm_name = "example-vm-two"
+}
+
+data "hypercore_vm" "example-vm-one" {
+ name = local.vm_name
+}
+
+data "hypercore_vm" "example-vm-two" {
+ name = local.another_vm_name
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule" {
+ name = "my-schedule"
+ rules = [
+ {
+ name = "first-example-rule",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ },
+ {
+ name = "second-example-rule",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ }
+ ]
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule-no-rules" {
+ name = "my-schedule-without-rules"
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule-imported" {
+ name = "my-imported-schedule"
+}
+
+import {
+ to = hypercore_vm_snapshot_schedule.example-schedule-imported
+ id = "69b21f14-6bb6-4dd5-a6bc-6dec9bd59c96"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `name` (String) Snapshot schedule name.
+
+### Optional
+
+- `rules` (Attributes List) Scheduled snapshot rules. (see [below for nested schema](#nestedatt--rules))
+
+### Read-Only
+
+- `id` (String) Snapshot schedule identifier
+
+
+### Nested Schema for `rules`
+
+Required:
+
+- `frequency` (String) Frequency based on RFC-2445 (FREQ=MINUTELY;INTERVAL=5)
+- `local_retention_seconds` (Number) Number of seconds before snapshots are removed
+- `name` (String) Rule name
+- `start_timestamp` (String) Local timezone timestamp (2010-01-01 00:00:00) of when a snapshot is to be taken
+
+Optional:
+
+- `remote_retention_seconds` (Number) Number of seconds before snapshots are removed. If not set, it'll be the same as `local_retention_seconds`
diff --git a/examples/resources/hypercore_vm/resource.tf b/examples/resources/hypercore_vm/resource.tf
index 41c4b34..422596d 100644
--- a/examples/resources/hypercore_vm/resource.tf
+++ b/examples/resources/hypercore_vm/resource.tf
@@ -13,8 +13,9 @@ resource "hypercore_vm" "myvm" {
name = local.vm_name
description = "some description"
- vcpu = 4
- memory = 4096 # MiB
+ vcpu = 4
+ memory = 4096 # MiB
+ snapshot_schedule_uuid = data.hypercore_vm.clone_source_vm.vms.0.snapshot_schedule_uuid
clone = {
source_vm_uuid = data.hypercore_vm.clone_source_vm.vms.0.uuid
diff --git a/examples/resources/hypercore_vm_snapshot/resource.tf b/examples/resources/hypercore_vm_snapshot/resource.tf
new file mode 100644
index 0000000..3b2b95b
--- /dev/null
+++ b/examples/resources/hypercore_vm_snapshot/resource.tf
@@ -0,0 +1,26 @@
+locals {
+ vm_name = "example-vm-one"
+ another_vm_name = "example-vm-two"
+}
+
+data "hypercore_vm" "example-vm-one" {
+ name = local.vm_name
+}
+
+data "hypercore_vm" "example-vm-two" {
+ name = local.another_vm_name
+}
+
+resource "hypercore_vm_snapshot" "snapshot" {
+ vm_uuid = data.hypercore_vm.example-vm-one.vms.0.uuid
+ label = "my-snapshot"
+}
+
+resource "hypercore_vm_snapshot" "imported-snapshot" {
+ vm_uuid = data.hypercore_vm.example-vm-two.vms.0.uuid
+}
+
+import {
+ to = hypercore_vm_snapshot.imported-snapshot
+ id = "24ab2255-ca77-49ec-bc96-f469cec3affb"
+}
diff --git a/examples/resources/hypercore_vm_snapshot_schedule/resource.tf b/examples/resources/hypercore_vm_snapshot_schedule/resource.tf
new file mode 100644
index 0000000..4fc0176
--- /dev/null
+++ b/examples/resources/hypercore_vm_snapshot_schedule/resource.tf
@@ -0,0 +1,43 @@
+locals {
+ vm_name = "example-vm-one"
+ another_vm_name = "example-vm-two"
+}
+
+data "hypercore_vm" "example-vm-one" {
+ name = local.vm_name
+}
+
+data "hypercore_vm" "example-vm-two" {
+ name = local.another_vm_name
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule" {
+ name = "my-schedule"
+ rules = [
+ {
+ name = "first-example-rule",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ },
+ {
+ name = "second-example-rule",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ }
+ ]
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule-no-rules" {
+ name = "my-schedule-without-rules"
+}
+
+resource "hypercore_vm_snapshot_schedule" "example-schedule-imported" {
+ name = "my-imported-schedule"
+}
+
+import {
+ to = hypercore_vm_snapshot_schedule.example-schedule-imported
+ id = "69b21f14-6bb6-4dd5-a6bc-6dec9bd59c96"
+}
diff --git a/internal/provider/hypercore_vm_data_source.go b/internal/provider/hypercore_vm_data_source.go
index de790cf..ca69b2e 100644
--- a/internal/provider/hypercore_vm_data_source.go
+++ b/internal/provider/hypercore_vm_data_source.go
@@ -43,14 +43,15 @@ type hypercoreVMsDataSourceModel struct {
// hypercoreVMModel maps VM schema data.
type hypercoreVMModel struct {
- UUID types.String `tfsdk:"uuid"`
- Name types.String `tfsdk:"name"`
- Description types.String `tfsdk:"description"`
- PowerState types.String `tfsdk:"power_state"`
- VCPU types.Int32 `tfsdk:"vcpu"`
- Memory types.Int64 `tfsdk:"memory"`
- Tags []types.String `tfsdk:"tags"`
- Disks []HypercoreDiskModel `tfsdk:"disks"`
+ UUID types.String `tfsdk:"uuid"`
+ Name types.String `tfsdk:"name"`
+ Description types.String `tfsdk:"description"`
+ PowerState types.String `tfsdk:"power_state"`
+ VCPU types.Int32 `tfsdk:"vcpu"`
+ Memory types.Int64 `tfsdk:"memory"`
+ SnapshotScheduleUUID types.String `tfsdk:"snapshot_schedule_uuid"`
+ Tags []types.String `tfsdk:"tags"`
+ Disks []HypercoreDiskModel `tfsdk:"disks"`
// TODO nics
AffinityStrategy AffinityStrategyModel `tfsdk:"affinity_strategy"`
}
@@ -92,6 +93,10 @@ func (d *hypercoreVMDataSource) Schema(_ context.Context, _ datasource.SchemaReq
MarkdownDescription: "Memory (RAM) size in MiB",
Optional: true,
},
+ "snapshot_schedule_uuid": schema.StringAttribute{
+ MarkdownDescription: "UUID of the applied snapshot schedule for creating automated snapshots",
+ Computed: true,
+ },
"description": schema.StringAttribute{
Computed: true,
},
@@ -226,15 +231,16 @@ func (d *hypercoreVMDataSource) Read(ctx context.Context, req datasource.ReadReq
memory_B := utils.AnyToInteger64(vm["mem"])
memory_MiB := memory_B / 1024 / 1024
hypercoreVMState := hypercoreVMModel{
- UUID: types.StringValue(utils.AnyToString(vm["uuid"])),
- Name: types.StringValue(utils.AnyToString(vm["name"])),
- VCPU: types.Int32Value(int32(utils.AnyToInteger64(vm["numVCPU"]))),
- Memory: types.Int64Value(memory_MiB),
- Description: types.StringValue(utils.AnyToString(vm["description"])),
- PowerState: types.StringValue(utils.AnyToString(vm["state"])), // TODO convert (stopped vs SHUTOFF)
- Tags: tags_String,
- AffinityStrategy: affinityStrategy,
- Disks: disks,
+ UUID: types.StringValue(utils.AnyToString(vm["uuid"])),
+ Name: types.StringValue(utils.AnyToString(vm["name"])),
+ VCPU: types.Int32Value(int32(utils.AnyToInteger64(vm["numVCPU"]))),
+ Memory: types.Int64Value(memory_MiB),
+ SnapshotScheduleUUID: types.StringValue(utils.AnyToString(vm["snapshotScheduleUUID"])),
+ Description: types.StringValue(utils.AnyToString(vm["description"])),
+ PowerState: types.StringValue(utils.AnyToString(vm["state"])), // TODO convert (stopped vs SHUTOFF)
+ Tags: tags_String,
+ AffinityStrategy: affinityStrategy,
+ Disks: disks,
}
state.Vms = append(state.Vms, hypercoreVMState)
}
diff --git a/internal/provider/hypercore_vm_resource.go b/internal/provider/hypercore_vm_resource.go
index 64a8811..13fc534 100644
--- a/internal/provider/hypercore_vm_resource.go
+++ b/internal/provider/hypercore_vm_resource.go
@@ -34,14 +34,15 @@ type HypercoreVMResource struct {
// HypercoreVMResourceModel describes the resource data model.
type HypercoreVMResourceModel struct {
- Group types.String `tfsdk:"group"`
- Name types.String `tfsdk:"name"`
- Description types.String `tfsdk:"description"`
- VCPU types.Int32 `tfsdk:"vcpu"`
- Memory types.Int64 `tfsdk:"memory"`
- Clone CloneModel `tfsdk:"clone"`
- AffinityStrategy AffinityStrategyModel `tfsdk:"affinity_strategy"`
- Id types.String `tfsdk:"id"`
+ Group types.String `tfsdk:"group"`
+ Name types.String `tfsdk:"name"`
+ Description types.String `tfsdk:"description"`
+ VCPU types.Int32 `tfsdk:"vcpu"`
+ Memory types.Int64 `tfsdk:"memory"`
+ SnapshotScheduleUUID types.String `tfsdk:"snapshot_schedule_uuid"`
+ Clone CloneModel `tfsdk:"clone"`
+ AffinityStrategy AffinityStrategyModel `tfsdk:"affinity_strategy"`
+ Id types.String `tfsdk:"id"`
}
type CloneModel struct {
@@ -90,6 +91,10 @@ func (r *HypercoreVMResource) Schema(ctx context.Context, req resource.SchemaReq
"and it's memory was modified, the cloned VM will be rebooted (either gracefully or forcefully)",
Optional: true,
},
+ "snapshot_schedule_uuid": schema.StringAttribute{
+ MarkdownDescription: "UUID of the snapshot schedule to create automatic snapshots",
+ Optional: true,
+ },
"clone": schema.ObjectAttribute{
MarkdownDescription: "" +
"Clone options if the VM is being created as a clone. The `source_vm_uuid` is the UUID of the VM used for cloning,
" +
@@ -207,6 +212,7 @@ func (r *HypercoreVMResource) Create(ctx context.Context, req resource.CreateReq
tags,
data.VCPU.ValueInt32Pointer(),
data.Memory.ValueInt64Pointer(),
+ data.SnapshotScheduleUUID.ValueStringPointer(),
nil,
data.AffinityStrategy.StrictAffinity.ValueBool(),
data.AffinityStrategy.PreferredNodeUUID.ValueString(),
@@ -278,6 +284,7 @@ func (r *HypercoreVMResource) Read(ctx context.Context, req resource.ReadRequest
// uiState TODO
data.VCPU = types.Int32Value(int32(utils.AnyToInteger64(hc3_vm["numVCPU"])))
data.Memory = types.Int64Value(utils.AnyToInteger64(hc3_vm["mem"]) / 1024 / 1024)
+ data.SnapshotScheduleUUID = types.StringValue(utils.AnyToString(hc3_vm["snapshotScheduleUUID"]))
affinityStrategy := utils.AnyToMap(hc3_vm["affinityStrategy"])
data.AffinityStrategy.StrictAffinity = types.BoolValue(utils.AnyToBool(affinityStrategy["strictAffinity"]))
@@ -335,6 +342,9 @@ func (r *HypercoreVMResource) Update(ctx context.Context, req resource.UpdateReq
if data_state.VCPU != data.VCPU {
updatePayload["numVCPU"] = data.VCPU.ValueInt32()
}
+ if data_state.SnapshotScheduleUUID != data.SnapshotScheduleUUID {
+ updatePayload["snapshotScheduleUUID"] = data.SnapshotScheduleUUID.ValueString()
+ }
affinityStrategy := map[string]any{}
if data_state.AffinityStrategy.StrictAffinity != data.AffinityStrategy.StrictAffinity {
diff --git a/internal/provider/hypercore_vm_resource_test.go b/internal/provider/hypercore_vm_resource_test.go
index 99a9b1b..b365242 100644
--- a/internal/provider/hypercore_vm_resource_test.go
+++ b/internal/provider/hypercore_vm_resource_test.go
@@ -87,6 +87,7 @@ resource "hypercore_vm" "test" {
vcpu = 4
memory = 4096
description = "testtf-vm-description"
+ snapshot_schedule_uuid = ""
// power_state = %[3]q
clone = {
source_vm_uuid = %[2]q
diff --git a/internal/provider/hypercore_vm_snapshot.go b/internal/provider/hypercore_vm_snapshot.go
new file mode 100644
index 0000000..9c79acd
--- /dev/null
+++ b/internal/provider/hypercore_vm_snapshot.go
@@ -0,0 +1,271 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package provider
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/hashicorp/terraform-provider-hypercore/internal/utils"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ resource.Resource = &HypercoreVMSnapshotResource{}
+var _ resource.ResourceWithImportState = &HypercoreVMSnapshotResource{}
+
+func NewHypercoreVMSnapshotResource() resource.Resource {
+ return &HypercoreVMSnapshotResource{}
+}
+
+// HypercoreVMSnapshotResource defines the resource implementation.
+type HypercoreVMSnapshotResource struct {
+ client *utils.RestClient
+}
+
+// HypercoreVMSnapshotResourceModel describes the resource data model.
+type HypercoreVMSnapshotResourceModel struct {
+ Id types.String `tfsdk:"id"`
+ VmUUID types.String `tfsdk:"vm_uuid"`
+ Type types.String `tfsdk:"type"`
+ Label types.String `tfsdk:"label"`
+}
+
+func (r *HypercoreVMSnapshotResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_vm_snapshot"
+}
+
+func (r *HypercoreVMSnapshotResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Hypercore VM snapshot resource to manage VM snapshots",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "VM snapshot identifier",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "vm_uuid": schema.StringAttribute{
+ MarkdownDescription: "VM UUID of which we want to create a snapshot.",
+ Required: true,
+ },
+ "type": schema.StringAttribute{
+ MarkdownDescription: "Snapshot type. Can be: USER, AUTOMATED, SUPPORT",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "label": schema.StringAttribute{
+ MarkdownDescription: "Snapshot label.",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func (r *HypercoreVMSnapshotResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource CONFIGURE")
+ // Prevent padisk if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ restClient, ok := req.ProviderData.(*utils.RestClient)
+
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = restClient
+}
+
+func (r *HypercoreVMSnapshotResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource CREATE")
+ var data HypercoreVMSnapshotResourceModel
+
+ // Read Terraform plan data into the model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+
+ if r.client == nil {
+ resp.Diagnostics.AddError(
+ "Unconfigured HTTP Client",
+ "Expected configured HTTP client. Please report this issue to the provider developers.",
+ )
+ return
+ }
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ restClient := *r.client
+ vmUUID := data.VmUUID.ValueString()
+ snapLabel := data.Label.ValueString()
+ snapType := "USER"
+
+ if snapLabel == "" || data.Label.IsUnknown() || data.Label.IsNull() {
+ resp.Diagnostics.AddError(
+ "Missing 'label' parameter",
+ "Snapshots must be labeled",
+ )
+ return
+ }
+
+ // Create VM snapshot
+ payload := map[string]any{
+ "domainUUID": vmUUID,
+ "label": snapLabel,
+ "type": snapType,
+
+ // These are all defaults from API and are
+ // required by the API to be present
+ "automatedTriggerTimestamp": 0,
+ "localRetainUntilTimestamp": 0,
+ "remoteRetainUntilTimestamp": 0,
+ "blockCountDiffFromSerialNumber": -1,
+ "replication": true,
+ }
+ snapUUID, snap, _diag := utils.CreateVMSnapshot(restClient, vmUUID, payload, ctx)
+ if _diag != nil {
+ resp.Diagnostics.AddWarning(_diag.Summary(), _diag.Detail())
+ }
+ tflog.Info(ctx, fmt.Sprintf("TTRT Created: vm_uuid=%s, label=%s, type=%s, snap=%v", vmUUID, snapLabel, snapType, snap))
+
+ // TODO: Check if HC3 matches TF
+ // save into the Terraform state.
+ data.Id = types.StringValue(snapUUID)
+ data.Type = types.StringValue(snapType)
+
+ // Write logs using the tflog package
+ // Documentation: https://terraform.io/plugin/log
+ tflog.Trace(ctx, "Created a VM snapshot")
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource READ")
+ var data HypercoreVMSnapshotResourceModel
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Snapshot read ======================================================================
+ restClient := *r.client
+ snapUUID := data.Id.ValueString()
+ tflog.Debug(ctx, fmt.Sprintf("TTRT HypercoreSnapshot Read oldState snapUUID=%s\n", snapUUID))
+
+ pHc3Snap := utils.GetVMSnapshotByUUID(restClient, snapUUID)
+ if pHc3Snap == nil {
+ resp.Diagnostics.AddError("Snapshot not found", fmt.Sprintf("Snapshot not found - snapUUID=%s", snapUUID))
+ return
+ }
+ hc3Snap := *pHc3Snap
+
+ tflog.Info(ctx, fmt.Sprintf("TTRT HypercoreSnapshot: snap_uuid=%s, vm_uuid=%s, label=%s, type=%s\n", snapUUID, data.VmUUID.ValueString(), data.Label.ValueString(), data.Type.ValueString()))
+
+ vmUUID := utils.AnyToString(hc3Snap["domainUUID"])
+ snapLabel := utils.AnyToString(hc3Snap["label"])
+ snapType := utils.AnyToString(hc3Snap["type"])
+
+ // save into the Terraform state.
+ data.Id = types.StringValue(snapUUID)
+ data.VmUUID = types.StringValue(vmUUID)
+ data.Label = types.StringValue(snapLabel)
+ data.Type = types.StringValue(snapType)
+
+ // Save updated data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ // NOTE: /rest/v1/VirDomainSnapshot has no update endpoints, so update is not needed here
+
+ // tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource UPDATE")
+ // var data_state HypercoreVMSnapshotResourceModel
+ // resp.Diagnostics.Append(req.State.Get(ctx, &data_state)...)
+ // var data HypercoreVMSnapshotResourceModel
+ //
+ // // Read Terraform plan data into the model
+ // resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+ //
+ // if resp.Diagnostics.HasError() {
+ // return
+ // }
+ //
+ // // Save updated data into Terraform state
+ // resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource DELETE")
+ var data HypercoreVMSnapshotResourceModel
+
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // If applicable, this is a great opportunity to initialize any necessary
+ // provider client data and make a call using it.
+ // httpResp, err := r.client.Do(httpReq)
+ // if err != nil {
+ // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete example, got error: %s", err))
+ // return
+ // }
+
+ restClient := *r.client
+ snapUUID := data.Id.ValueString()
+ taskTag := restClient.DeleteRecord(
+ fmt.Sprintf("/rest/v1/VirDomainSnapshot/%s", snapUUID),
+ -1,
+ ctx,
+ )
+ taskTag.WaitTask(restClient, ctx)
+}
+
+func (r *HypercoreVMSnapshotResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotResource IMPORT_STATE")
+
+ snapUUID := req.ID
+ tflog.Info(ctx, fmt.Sprintf("TTRT HypercoreVMSnapshotResource: snapUUID=%s", snapUUID))
+
+ restClient := *r.client
+ hc3Snapshot := utils.GetVMSnapshotByUUID(restClient, snapUUID)
+
+ if hc3Snapshot == nil {
+ msg := fmt.Sprintf("VM Snapshot import, snapshot not found - 'snap_uuid'='%s'.", req.ID)
+ resp.Diagnostics.AddError("VM Snapshot import error, snapshot not found", msg)
+ return
+ }
+
+ snapType := utils.AnyToString((*hc3Snapshot)["type"])
+ snapLabel := utils.AnyToString((*hc3Snapshot)["label"])
+ vmUUID := utils.AnyToString((*hc3Snapshot)["domainUUID"])
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), snapUUID)...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("vm_uuid"), vmUUID)...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("type"), snapType)...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("label"), snapLabel)...)
+}
diff --git a/internal/provider/hypercore_vm_snapshot_schedule.go b/internal/provider/hypercore_vm_snapshot_schedule.go
new file mode 100644
index 0000000..6ff17d2
--- /dev/null
+++ b/internal/provider/hypercore_vm_snapshot_schedule.go
@@ -0,0 +1,509 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package provider
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/hashicorp/terraform-provider-hypercore/internal/utils"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ resource.Resource = &HypercoreVMSnapshotScheduleResource{}
+var _ resource.ResourceWithImportState = &HypercoreVMSnapshotScheduleResource{}
+
+func NewHypercoreVMSnapshotScheduleResource() resource.Resource {
+ return &HypercoreVMSnapshotScheduleResource{}
+}
+
+// HypercoreVMSnapshotScheduleResource defines the resource implementation.
+type HypercoreVMSnapshotScheduleResource struct {
+ client *utils.RestClient
+}
+
+// HypercoreVMSnapshotScheduleResourceModel describes the resource data model.
+type HypercoreVMSnapshotScheduleResourceModel struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ Rules types.List `tfsdk:"rules"`
+}
+
+type RulesModel struct {
+ Name types.String `tfsdk:"name"`
+ StartTimestamp types.String `tfsdk:"start_timestamp"`
+ Frequency types.String `tfsdk:"frequency"`
+ LocalRetentionSeconds types.Int64 `tfsdk:"local_retention_seconds"`
+ RemoteRetentionSeconds types.Int64 `tfsdk:"remote_retention_seconds"`
+}
+
+var rulesModelAttrType = map[string]attr.Type{
+ "name": types.StringType,
+ "start_timestamp": types.StringType,
+ "frequency": types.StringType,
+ "local_retention_seconds": types.Int64Type,
+ "remote_retention_seconds": types.Int64Type,
+}
+
+func GetRulesAttrValues(rules []RulesModel) ([]attr.Value, diag.Diagnostics) {
+ var ruleValues []attr.Value
+ for _, rule := range rules {
+ ruleMap := map[string]attr.Value{
+ "name": rule.Name,
+ "start_timestamp": rule.StartTimestamp,
+ "frequency": rule.Frequency,
+ "local_retention_seconds": rule.LocalRetentionSeconds,
+ "remote_retention_seconds": rule.RemoteRetentionSeconds,
+ }
+ obj, diags := types.ObjectValue(rulesModelAttrType, ruleMap)
+ if diags.HasError() {
+ return nil, diags
+ }
+ ruleValues = append(ruleValues, obj)
+ }
+ return ruleValues, nil
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_vm_snapshot_schedule"
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Hypercore VM snapshot schedule resource to manage VM snapshots",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Snapshot schedule identifier",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Snapshot schedule name.",
+ Required: true,
+ },
+ "rules": schema.ListNestedAttribute{
+ MarkdownDescription: "Scheduled snapshot rules.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.List{
+ listplanmodifier.UseStateForUnknown(),
+ },
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Rule name",
+ Required: true,
+ },
+ "start_timestamp": schema.StringAttribute{
+ MarkdownDescription: "Local timezone timestamp (2010-01-01 00:00:00) of when a snapshot is to be taken",
+ Required: true,
+ },
+ "frequency": schema.StringAttribute{
+ MarkdownDescription: "Frequency based on RFC-2445 (FREQ=MINUTELY;INTERVAL=5)",
+ Required: true,
+ },
+ "local_retention_seconds": schema.Int64Attribute{
+ MarkdownDescription: "Number of seconds before snapshots are removed",
+ Required: true,
+ },
+ "remote_retention_seconds": schema.Int64Attribute{
+ MarkdownDescription: "Number of seconds before snapshots are removed. If not set, it'll be the same as `local_retention_seconds`",
+ Optional: true,
+ Computed: true,
+ Default: int64default.StaticInt64(0),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource CONFIGURE")
+ // Prevent padisk if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ restClient, ok := req.ProviderData.(*utils.RestClient)
+
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = restClient
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource CREATE")
+ var data HypercoreVMSnapshotScheduleResourceModel
+
+ // Read Terraform plan data into the model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+
+ if r.client == nil {
+ resp.Diagnostics.AddError(
+ "Unconfigured HTTP Client",
+ "Expected configured HTTP client. Please report this issue to the provider developers.",
+ )
+ return
+ }
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ restClient := *r.client
+ scheduleName := data.Name.ValueString()
+
+ var scheduleRules []RulesModel
+ if data.Rules.IsUnknown() {
+ scheduleRules = []RulesModel{}
+ } else {
+ if len(data.Rules.Elements()) != 0 {
+ scheduleRules = make([]RulesModel, len(data.Rules.Elements()))
+ diags := data.Rules.ElementsAs(ctx, &scheduleRules, false)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ }
+ }
+
+ var payloadScheduleRules []map[string]any
+ if len(scheduleRules) != 0 {
+ for _, scheduleRule := range scheduleRules {
+ payloadScheduleRules = append(payloadScheduleRules, map[string]any{
+ "dtstart": scheduleRule.StartTimestamp.ValueString(),
+ "rrule": scheduleRule.Frequency.ValueString(),
+ "name": scheduleRule.Name.ValueString(),
+ "localRetentionDurationSeconds": scheduleRule.LocalRetentionSeconds.ValueInt64(),
+ "remoteRetentionDurationSeconds": scheduleRule.RemoteRetentionSeconds.ValueInt64(),
+ })
+ }
+ } else {
+ payloadScheduleRules = []map[string]any{} // empty list
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("TTRT Create: scheduleRules = %v", scheduleRules))
+
+ // Create schedule
+ payload := map[string]any{
+ "name": scheduleName,
+ "rrules": payloadScheduleRules,
+ }
+ scheduleUUID, schedule, _diag := utils.CreateVMSnapshotSchedule(restClient, payload, ctx)
+ if _diag != nil {
+ resp.Diagnostics.AddWarning(_diag.Summary(), _diag.Detail())
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("TTRT Created: schedule_uuid=%s, name=%s, rules=%v, schedule=%s", scheduleUUID, scheduleName, scheduleRules, schedule))
+
+ // TODO: Check if HC3 matches TF
+
+ // Retrieve rules data
+ var ruleValues []attr.Value
+ var _diags diag.Diagnostics
+ if schedule["rrules"] != nil {
+ hc3Rules := utils.AnyToListOfMap(schedule["rrules"])
+ for i := range hc3Rules {
+ if scheduleRules[i].RemoteRetentionSeconds.IsUnknown() {
+ scheduleRules[i].RemoteRetentionSeconds = types.Int64Value(0)
+ }
+ }
+
+ ruleValues, _diags = GetRulesAttrValues(scheduleRules)
+ if _diags != nil {
+ resp.Diagnostics.Append(_diags...)
+ return
+ }
+ } else {
+ ruleValues = []attr.Value{} // make it an empty list
+ }
+
+ data.Rules, _diags = types.ListValue(
+ types.ObjectType{AttrTypes: rulesModelAttrType},
+ ruleValues,
+ )
+ if _diags.HasError() {
+ resp.Diagnostics.Append(_diags...)
+ return
+ }
+
+ // save into the Terraform state.
+ data.Id = types.StringValue(scheduleUUID)
+
+ // Write logs using the tflog package
+ // Documentation: https://terraform.io/plugin/log
+ tflog.Trace(ctx, "Created a schedule")
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource READ")
+ var data HypercoreVMSnapshotScheduleResourceModel
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // Schedule read ======================================================================
+ restClient := *r.client
+ scheduleUUID := data.Id.ValueString()
+ tflog.Debug(ctx, fmt.Sprintf("TTRT HypercoreSnapshotSchedule Read oldState scheduleUUID=%s\n", scheduleUUID))
+
+ pHc3Schedule := utils.GetVMSnapshotScheduleByUUID(restClient, scheduleUUID)
+ if pHc3Schedule == nil {
+ resp.Diagnostics.AddError("Schedule not found", fmt.Sprintf("Schedule not found - scheduleUUID=%s", scheduleUUID))
+ return
+ }
+ hc3Schedule := *pHc3Schedule
+
+ var scheduleRules []RulesModel
+ var ruleValues []attr.Value
+ var diags diag.Diagnostics
+ if hc3Schedule["rrules"] != nil {
+ hc3Rules := utils.AnyToListOfMap(hc3Schedule["rrules"])
+ scheduleRules = make([]RulesModel, len(hc3Rules))
+ for i, hc3Rule := range hc3Rules {
+ scheduleRules[i].Name = types.StringValue(utils.AnyToString(hc3Rule["name"]))
+ scheduleRules[i].Frequency = types.StringValue(utils.AnyToString(hc3Rule["rrule"]))
+ scheduleRules[i].StartTimestamp = types.StringValue(utils.AnyToString(hc3Rule["dtstart"]))
+ scheduleRules[i].LocalRetentionSeconds = types.Int64Value(utils.AnyToInteger64(hc3Rule["localRetentionDurationSeconds"]))
+ scheduleRules[i].RemoteRetentionSeconds = types.Int64Value(utils.AnyToInteger64(hc3Rule["remoteRetentionDurationSeconds"]))
+ }
+ ruleValues, diags = GetRulesAttrValues(scheduleRules)
+ if diags != nil {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ } else {
+ ruleValues = []attr.Value{} // make it an empty list
+ }
+
+ scheduleName := utils.AnyToString(hc3Schedule["name"])
+ tflog.Info(ctx, fmt.Sprintf("TTRT HypercoreSnapshot: schedule_uuid=%s, name=%s, rules=%v\n", scheduleUUID, scheduleName, scheduleRules))
+
+ // ====== Save into the Terraform state ======
+ // Save schedule UUID
+ data.Id = types.StringValue(scheduleUUID)
+
+ // Save schedule name
+ data.Name = types.StringValue(scheduleName)
+
+ // Save schedule rules
+ data.Rules, diags = types.ListValue(
+ types.ObjectType{AttrTypes: rulesModelAttrType},
+ ruleValues,
+ )
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Save updated data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource UPDATE")
+ var data_state HypercoreVMSnapshotScheduleResourceModel
+ resp.Diagnostics.Append(req.State.Get(ctx, &data_state)...)
+ var data HypercoreVMSnapshotScheduleResourceModel
+
+ // Read Terraform plan data into the model
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ restClient := *r.client
+ scheduleUUID := data.Id.ValueString()
+ scheduleName := data.Name.ValueString()
+
+ var scheduleRules []RulesModel
+ if len(data.Rules.Elements()) != 0 {
+ scheduleRules = make([]RulesModel, len(data.Rules.Elements()))
+ diags := data.Rules.ElementsAs(ctx, &scheduleRules, false)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ }
+
+ var dataStateScheduleRules []RulesModel
+ if len(data_state.Rules.Elements()) != 0 {
+ dataStateScheduleRules = make([]RulesModel, len(data.Rules.Elements()))
+ diags := data_state.Rules.ElementsAs(ctx, &dataStateScheduleRules, false)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ }
+ tflog.Debug(
+ ctx, fmt.Sprintf(
+ "TTRT HypercoreVMSnapshotSchedule Update schedule_uuid=%s REQUESTED schedule_name=%s, rules=%v\n",
+ scheduleUUID, scheduleName, scheduleRules),
+ )
+ tflog.Debug(ctx, fmt.Sprintf(
+ "TTRT HypercoreVMSnapshotSchedule Update schedule_uuid=%s STATE schedule_name=%s, rules=%v\n",
+ scheduleUUID, data_state.Name.ValueString(), dataStateScheduleRules),
+ )
+
+ var payloadScheduleRules []map[string]any
+ if len(scheduleRules) != 0 {
+ for _, scheduleRule := range scheduleRules {
+ payloadScheduleRules = append(payloadScheduleRules, map[string]any{
+ "dtstart": scheduleRule.StartTimestamp.ValueString(),
+ "rrule": scheduleRule.Frequency.ValueString(),
+ "name": scheduleRule.Name.ValueString(),
+ "localRetentionDurationSeconds": scheduleRule.LocalRetentionSeconds.ValueInt64(),
+ "remoteRetentionDurationSeconds": scheduleRule.RemoteRetentionSeconds.ValueInt64(),
+ })
+ }
+ } else {
+ payloadScheduleRules = []map[string]any{} // empty list
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("TTRT Update: scheduleRules = %v", scheduleRules))
+
+ // Update schedule
+ payload := map[string]any{
+ "name": scheduleName,
+ "rrules": payloadScheduleRules,
+ }
+ _diag := utils.UpdateVMSnapshotSchedule(restClient, scheduleUUID, payload, ctx)
+ if _diag != nil {
+ resp.Diagnostics.AddWarning(_diag.Summary(), _diag.Detail())
+ }
+
+ // TODO: Check if HC3 matches TF
+
+ // Retrieve rules data (it could be inconsistent)
+ hc3Schedule := utils.GetVMSnapshotScheduleByUUID(restClient, scheduleUUID)
+ var ruleValues []attr.Value
+ var diags diag.Diagnostics
+ if (*hc3Schedule)["rrules"] != nil {
+ hc3Rules := utils.AnyToListOfMap((*hc3Schedule)["rrules"])
+ for i := range hc3Rules {
+ if scheduleRules[i].RemoteRetentionSeconds.IsUnknown() {
+ scheduleRules[i].RemoteRetentionSeconds = types.Int64Value(0)
+ }
+ }
+
+ ruleValues, diags = GetRulesAttrValues(scheduleRules)
+ if diags != nil {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ } else {
+ ruleValues = []attr.Value{} // make it an empty list
+ }
+
+ data.Rules, diags = types.ListValue(
+ types.ObjectType{AttrTypes: rulesModelAttrType},
+ ruleValues,
+ )
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("TTRT HypercoreVMSnapshotSchedule: schedule_uuid=%s, name=%s, rules=%v", scheduleUUID, scheduleName, scheduleRules))
+
+ // Save updated data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource DELETE")
+ var data HypercoreVMSnapshotScheduleResourceModel
+
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // If applicable, this is a great opportunity to initialize any necessary
+ // provider client data and make a call using it.
+ // httpResp, err := r.client.Do(httpReq)
+ // if err != nil {
+ // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete example, got error: %s", err))
+ // return
+ // }
+
+ restClient := *r.client
+ scheduleUUID := data.Id.ValueString()
+ taskTag := restClient.DeleteRecord(
+ fmt.Sprintf("/rest/v1/VirDomainSnapshotSchedule/%s", scheduleUUID),
+ -1,
+ ctx,
+ )
+ taskTag.WaitTask(restClient, ctx)
+}
+
+func (r *HypercoreVMSnapshotScheduleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ // NOTE: Do we need import state or would it be better to have a data source instead?
+
+ tflog.Info(ctx, "TTRT HypercoreVMSnapshotScheduleResource IMPORT_STATE")
+
+ scheduleUUID := req.ID
+ tflog.Info(ctx, fmt.Sprintf("TTRT HypercoreVMSnapshotScheduleResource: scheduleUUID=%s", scheduleUUID))
+
+ restClient := *r.client
+ hc3Schedule := utils.GetVMSnapshotScheduleByUUID(restClient, scheduleUUID)
+
+ if hc3Schedule == nil {
+ msg := fmt.Sprintf("VM Schedule import, schedule not found - 'schedule_uuid'='%s'.", req.ID)
+ resp.Diagnostics.AddError("VM Schedule import error, schedule not found", msg)
+ return
+ }
+
+ scheduleName := utils.AnyToString((*hc3Schedule)["name"])
+ tflog.Info(ctx, fmt.Sprintf("TTRT Import: schedule=%v", *hc3Schedule))
+
+ var scheduleRules []map[string]any
+ if (*hc3Schedule)["rrule"] != nil {
+ hc3Rules := utils.AnyToListOfMap((*hc3Schedule)["rrule"])
+ for _, hc3Rule := range hc3Rules {
+ scheduleRules = append(scheduleRules, map[string]any{
+ "name": hc3Rule["name"],
+ "start_timestamp": hc3Rule["dtstart"],
+ "frequency": hc3Rule["rrule"],
+ "local_retention_seconds": hc3Rule["localRetentionDurationSeconds"],
+ "remote_retention_seconds": hc3Rule["remoteRetentionDurationSeconds"],
+ })
+ }
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), scheduleUUID)...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), scheduleName)...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("rules"), scheduleRules)...)
+}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 5292e97..c878f43 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -171,6 +171,8 @@ func (p *HypercoreProvider) Resources(ctx context.Context) []func() resource.Res
NewHypercoreISOResource,
NewHypercoreVMPowerStateResource,
NewHypercoreVMBootOrderResource,
+ NewHypercoreVMSnapshotResource,
+ NewHypercoreVMSnapshotScheduleResource,
}
}
diff --git a/internal/utils/vm.go b/internal/utils/vm.go
index 879b6bc..7141224 100644
--- a/internal/utils/vm.go
+++ b/internal/utils/vm.go
@@ -56,19 +56,20 @@ const (
)
type VM struct {
- UUID string
- VMName string
- sourceVMUUID string
- cloudInit map[string]any
- preserveMacAddress bool
- description *string
- tags *[]string
- vcpu *int32
- memory *int64
- powerState *string
- strictAffinity bool
- preferredNodeUUID string
- backupNodeUUID string
+ UUID string
+ VMName string
+ sourceVMUUID string
+ cloudInit map[string]any
+ preserveMacAddress bool
+ description *string
+ tags *[]string
+ vcpu *int32
+ memory *int64
+ snapshotScheduleUUID *string
+ powerState *string
+ strictAffinity bool
+ preferredNodeUUID string
+ backupNodeUUID string
_wasNiceShutdownTried bool
_didNiceShutdownWork bool
@@ -87,6 +88,7 @@ func NewVM(
_tags *[]string,
_vcpu *int32,
_memory *int64,
+ _snapshotScheduleUUID *string,
_powerState *string,
_strictAffinity bool,
_preferredNodeUUID string,
@@ -104,14 +106,15 @@ func NewVM(
"userData": userDataB64,
"metaData": metaDataB64,
},
- description: _description,
- tags: _tags,
- vcpu: _vcpu,
- memory: _memory,
- powerState: _powerState,
- strictAffinity: _strictAffinity,
- preferredNodeUUID: _preferredNodeUUID,
- backupNodeUUID: _backupNodeUUID,
+ description: _description,
+ tags: _tags,
+ vcpu: _vcpu,
+ memory: _memory,
+ snapshotScheduleUUID: _snapshotScheduleUUID,
+ powerState: _powerState,
+ strictAffinity: _strictAffinity,
+ preferredNodeUUID: _preferredNodeUUID,
+ backupNodeUUID: _backupNodeUUID,
// helpers
_wasNiceShutdownTried: false,
@@ -412,6 +415,9 @@ func (vc *VM) BuildUpdatePayload(changedParams map[string]bool) map[string]any {
if changed, ok := changedParams["vcpu"]; ok && changed {
updatePayload["numVCPU"] = *vc.vcpu
}
+ if changed, ok := changedParams["snapshotScheduleUUID"]; ok && changed {
+ updatePayload["snapshotScheduleUUID"] = *vc.snapshotScheduleUUID
+ }
affinityStrategy := map[string]any{}
if changed, ok := changedParams["strictAffinity"]; ok && changed {
@@ -455,6 +461,9 @@ func (vc *VM) GetChangedParams(ctx context.Context, vmFromClient map[string]any)
changedParams["powerState"] = desiredPowerState != vmFromClient["state"]
}
}
+ if vc.snapshotScheduleUUID != nil {
+ changedParams["snapshotScheduleUUID"] = *vc.snapshotScheduleUUID != vmFromClient["snapshotScheduleUUID"]
+ }
hc3AffinityStrategy := AnyToMap(vmFromClient["affinityStrategy"])
changedParams["strictAffinity"] = vc.strictAffinity != hc3AffinityStrategy["strictAffinity"]
diff --git a/internal/utils/vm_snapshot.go b/internal/utils/vm_snapshot.go
new file mode 100644
index 0000000..ae5d133
--- /dev/null
+++ b/internal/utils/vm_snapshot.go
@@ -0,0 +1,149 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package utils
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func GetVMSnapshotScheduleByUUID(
+ restClient RestClient,
+ scheduleUUID string,
+) *map[string]any {
+ schedule := restClient.GetRecord(
+ fmt.Sprintf("/rest/v1/VirDomainSnapshotSchedule/%s", scheduleUUID),
+ nil,
+ false,
+ -1,
+ )
+
+ return schedule
+}
+
+func GetVMSnapshotByUUID(
+ restClient RestClient,
+ snapUUID string,
+) *map[string]any {
+ snapshot := restClient.GetRecord(
+ fmt.Sprintf("/rest/v1/VirDomainSnapshot/%s", snapUUID),
+ nil,
+ false,
+ -1,
+ )
+
+ return snapshot
+}
+
+func CreateVMSnapshot(
+ restClient RestClient,
+ vmUUID string,
+ payload map[string]any,
+ ctx context.Context,
+) (string, map[string]any, diag.Diagnostic) {
+
+ taskTag, _, err := restClient.CreateRecord(
+ "/rest/v1/VirDomainSnapshot",
+ payload,
+ -1,
+ )
+
+ if err != nil {
+ return "", nil, diag.NewWarningDiagnostic(
+ "HC3 is receiving too many requests at the same time.",
+ fmt.Sprintf("Please retry apply after Terraform finishes it's current operation. HC3 response message: %v", err.Error()),
+ )
+ }
+
+ taskTag.WaitTask(restClient, ctx)
+ snapUUID := taskTag.CreatedUUID
+ snapshot := GetVMSnapshotByUUID(restClient, snapUUID)
+
+ return snapUUID, *snapshot, nil
+}
+
+func CreateVMSnapshotSchedule(
+ restClient RestClient,
+ payload map[string]any,
+ ctx context.Context,
+) (string, map[string]any, diag.Diagnostic) {
+
+ taskTag, status, err := restClient.CreateRecord(
+ "/rest/v1/VirDomainSnapshotSchedule",
+ payload,
+ -1,
+ )
+
+ tflog.Debug(ctx, fmt.Sprintf("TTRT Snapshot Create Status: %d\n", status))
+
+ if err != nil {
+ return "", nil, diag.NewWarningDiagnostic(
+ "HC3 is receiving too many requests at the same time.",
+ fmt.Sprintf("Please retry apply after Terraform finishes it's current operation. HC3 response message: %v", err.Error()),
+ )
+ }
+
+ taskTag.WaitTask(restClient, ctx)
+ scheduleUUID := taskTag.CreatedUUID
+ schedule := GetVMSnapshotScheduleByUUID(restClient, scheduleUUID)
+
+ return scheduleUUID, *schedule, nil
+}
+
+func UpdateVMSnapshotSchedule(
+ restClient RestClient,
+ scheduleUUID string,
+ payload map[string]any,
+ ctx context.Context,
+) diag.Diagnostic {
+
+ taskTag, err := restClient.UpdateRecord(
+ fmt.Sprintf("/rest/v1/VirDomainSnapshotSchedule/%s", scheduleUUID),
+ payload,
+ -1,
+ ctx,
+ )
+
+ if err != nil {
+ return diag.NewWarningDiagnostic(
+ "HC3 is receiving too many requests at the same time.",
+ fmt.Sprintf("Please retry apply after Terraform finishes it's current operation. HC3 response message: %v", err.Error()),
+ )
+ }
+
+ taskTag.WaitTask(restClient, ctx)
+
+ return nil
+}
+
+func RemoveVMSnapshotSchedule(
+ restClient RestClient,
+ vmUUID string,
+ ctx context.Context,
+) diag.Diagnostic {
+ payload := map[string]any{
+ "snapshotScheduleUUID": "",
+ }
+
+ taskTag, err := restClient.UpdateRecord(
+ fmt.Sprintf("/rest/v1/VirDomain/%s", vmUUID),
+ payload,
+ -1,
+ ctx,
+ )
+
+ if err != nil {
+ return diag.NewWarningDiagnostic(
+ "HC3 is receiving too many requests at the same time.",
+ fmt.Sprintf("Please retry apply after Terraform finishes it's current operation. HC3 response message: %v", err.Error()),
+ )
+ }
+
+ taskTag.WaitTask(restClient, ctx)
+
+ return nil
+}
diff --git a/local/main.tf b/local/main.tf
index e1af49c..12058ac 100644
--- a/local/main.tf
+++ b/local/main.tf
@@ -12,46 +12,88 @@ terraform {
provider "hypercore" {}
locals {
- vm_name = "testtf-disk-justin"
- empty_vm = "testtf-ana"
- clone_empty_vm = "testtf-clone-ana"
-
- vm_meta_data_tmpl = "./assets/meta-data.ubuntu-22.04.yml.tftpl"
- vm_user_data_tmpl = "./assets/user-data.ubuntu-22.04.yml.tftpl"
+ vm_name = "testtf-ana"
+ another_vm_name = "testtf-ana-3"
+ create_vm_name = "testtf-ana-scheduled"
}
-resource "hypercore_vm" "myvm" {
+
+data "hypercore_vm" "snapvm" {
name = local.vm_name
+}
+
+output "testtf-ana" {
+ value = data.hypercore_vm.snapvm.vms.0.snapshot_schedule_uuid
+}
+
+data "hypercore_vm" "another_snapvm_schedule" {
+ name = local.another_vm_name
+}
+
+resource "hypercore_vm_snapshot" "snapshot" {
+ vm_uuid = data.hypercore_vm.snapvm.vms.0.uuid
+ label = "testtf-ana-snapshot"
+}
+
+resource "hypercore_vm_snapshot" "imported-snapshot" {
+ vm_uuid = data.hypercore_vm.snapvm.vms.0.uuid
+}
+
+import {
+ to = hypercore_vm_snapshot.imported-snapshot
+ id = "24ab2255-ca77-49ec-bc96-f469cec3affb"
+}
+
+resource "hypercore_vm_snapshot_schedule" "testtf-schedule" {
+ name = "testtf-schedule-2"
+ rules = [
+ {
+ name = "testtf-rule-1",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ },
+ {
+ name = "testtf-rule-2",
+ start_timestamp = "2023-02-01 00:00:00",
+ frequency = "FREQ=MINUTELY;INTERVAL=1",
+ local_retention_seconds = 300
+ }
+ ]
+}
+
+resource "hypercore_vm" "testtf-ana-scheduled" {
+ group = "testtfxlab"
+ name = local.create_vm_name
+ description = "Testing terraform resources"
+ vcpu = 4
+ memory = 4096 # MiB
+ snapshot_schedule_uuid = hypercore_vm_snapshot_schedule.testtf-schedule.id
+
clone = {
- source_vm_uuid = ""
meta_data = ""
+ source_vm_uuid = ""
user_data = ""
}
- affinity_strategy = {
- strict_affinity = true
- preferred_node_uuid = data.hypercore_node.cluster0_peer1.nodes.0.uuid
- backup_node_uuid = data.hypercore_node.cluster0_peer1.nodes.0.uuid
- }
-}
-data "hypercore_node" "cluster0_all" {
+ depends_on = [
+ hypercore_vm_snapshot_schedule.testtf-schedule # make sure the schedule was created first
+ ]
}
-data "hypercore_node" "cluster0_peer1" {
- peer_id = 1
+output "testtf-ana-scheduled" {
+ value = hypercore_vm.testtf-ana-scheduled.snapshot_schedule_uuid
}
-output "myvm" {
- value = hypercore_vm.myvm
+resource "hypercore_vm_snapshot_schedule" "testtf-schedule-no-rules" {
+ name = "testtf-schedule-no-rules-3"
}
-output "cluster_0_peer_1_uuid" {
- value = data.hypercore_node.cluster0_peer1.nodes.0.uuid
+resource "hypercore_vm_snapshot_schedule" "testtf-schedule-imported" {
+ name = "testtf-existing-schedule"
}
-data "hypercore_vm" "demo" {
- name = "demo-vm"
-}
-output "vm_demo" {
- value = data.hypercore_vm.demo
+import {
+ to = hypercore_vm_snapshot_schedule.testtf-schedule-imported
+ id = "69b21f14-6bb6-4dd5-a6bc-6dec9bd59c96"
}