From 4b29e7ae2c20b395991fe363adcb4da3ce308431 Mon Sep 17 00:00:00 2001 From: Jaromir Hradilek Date: Mon, 14 Jul 2025 20:15:42 +0200 Subject: [PATCH] CNV-62649: Updated CNV modules to pass DITA validation. --- modules/virt-about-aaq-operator.adoc | 6 +- modules/virt-about-cloning.adoc | 4 +- modules/virt-about-instance-types.adoc | 2 - modules/virt-about-ksm.adoc | 6 +- modules/virt-about-scratch-space.adoc | 2 +- ...irt-about-static-and-dynamic-ssh-keys.adoc | 6 +- ...irt-about-storage-pools-pvc-templates.adoc | 1 - modules/virt-about-vm-snapshots.adoc | 3 +- modules/virt-about-workload-updates.adoc | 1 - .../virt-accessing-exported-vm-manifests.adoc | 6 +- ...cessing-node-exporter-outside-cluster.adoc | 12 ++-- modules/virt-accessing-rdp-console.adoc | 6 +- modules/virt-add-boot-order-web.adoc | 2 +- modules/virt-add-disk-to-vm.adoc | 2 +- modules/virt-adding-a-boot-source-web.adoc | 2 + modules/virt-adding-public-key-vm-cli.adoc | 6 +- modules/virt-adding-vm-to-service-mesh.adoc | 3 +- ...dditional-scc-for-kubevirt-controller.adoc | 9 ++- ...zing-datavolume-conditions-and-events.adoc | 3 - ...-assigning-pci-device-virtual-machine.adoc | 6 +- modules/virt-assigning-vgpu-vm-cli.adoc | 5 +- ...virt-attaching-virtio-disk-to-windows.adoc | 2 + modules/virt-attaching-vm-to-primary-udn.adoc | 3 +- .../virt-attaching-vm-to-secondary-udn.adoc | 3 +- .../virt-automatic-certificates-renewal.adoc | 2 - .../virt-autoupdate-custom-bootsource.adoc | 1 - modules/virt-binding-devices-vfio-driver.adoc | 20 ++++--- modules/virt-booting-vms-uefi-mode.adoc | 3 +- .../virt-cdi-supported-operations-matrix.adoc | 60 ++++++++++++------- .../virt-checking-cluster-dpdk-readiness.adoc | 14 ++--- .../virt-checking-storage-configuration.adoc | 11 ++-- modules/virt-cloning-a-datavolume.adoc | 6 +- modules/virt-cloning-pvc-to-dv-cli.adoc | 6 +- .../virt-cluster-resource-requirements.adoc | 43 +++++++------ .../virt-configure-multiple-iothreads.adoc | 9 +-- modules/virt-configuring-cluster-dpdk.adoc | 9 +-- ...iguring-cluster-eviction-strategy-cli.adoc | 1 - .../virt-configuring-cluster-real-time.adoc | 8 +-- .../virt-configuring-downward-metrics.adoc | 1 - ...virt-configuring-interface-link-state.adoc | 5 +- ...virt-configuring-live-migration-heavy.adoc | 9 +-- ...irt-configuring-live-migration-limits.adoc | 5 +- modules/virt-configuring-runstrategy-vm.adoc | 3 +- ...virt-configuring-secondary-dns-server.adoc | 6 +- ...g-secondary-network-vm-live-migration.adoc | 4 +- modules/virt-configuring-vm-dpdk.adoc | 3 +- ...-configuring-vm-eviction-strategy-cli.adoc | 3 +- modules/virt-configuring-vm-project-dpdk.adoc | 3 +- modules/virt-configuring-vm-real-time.adoc | 11 ++-- .../virt-configuring-vm-use-usb-device.adoc | 1 - ...iguring-vm-with-node-exporter-service.adoc | 3 +- ...virt-connecting-secondary-network-ssh.adoc | 6 +- modules/virt-connecting-vm-internal-fqdn.adoc | 3 +- ...-connecting-vm-secondarynw-using-fqdn.adoc | 3 +- .../virt-creating-a-primary-cluster-udn.adoc | 3 +- modules/virt-creating-a-primary-udn.adoc | 3 +- ...reating-and-exposing-mediated-devices.adoc | 11 ++-- .../virt-creating-secondary-localnet-udn.adoc | 6 +- ...virt-creating-secondary-udn-namespace.adoc | 1 - modules/virt-creating-service-virtctl.adoc | 4 +- .../virt-creating-virtualmachineexport.adoc | 4 +- modules/virt-creating-vm-cli.adoc | 1 - .../virt-creating-vm-container-disk-cli.adoc | 4 +- modules/virt-creating-vm-instancetype.adoc | 4 +- modules/virt-creating-vm-snapshot-cli.adoc | 8 ++- modules/virt-creating-vm-web-page-cli.adoc | 6 +- ...rage-profile-default-cloning-strategy.adoc | 4 +- modules/virt-customizing-storage-profile.adoc | 1 - .../virt-define-guest-agent-ping-probe.adoc | 2 - modules/virt-define-http-liveness-probe.adoc | 4 +- modules/virt-define-http-readiness-probe.adoc | 4 +- modules/virt-define-tcp-readiness-probe.adoc | 4 +- modules/virt-defining-apps-for-dr.adoc | 32 ++++------ modules/virt-delete-vm-web.adoc | 2 +- modules/virt-deleting-virt-cli.adoc | 3 +- modules/virt-deploying-operator-cli.adoc | 1 - modules/virt-disabling-tls-for-registry.adoc | 3 +- .../virt-discovering-vm-internal-fqdn.adoc | 3 +- modules/virt-dv-annotations.adoc | 2 + modules/virt-edit-boot-order-web.adoc | 2 +- modules/virt-editing-vm-yaml-web.adoc | 1 + .../virt-enabling-descheduler-evictions.adoc | 3 +- ...rt-enabling-dynamic-key-injection-cli.adoc | 6 +- .../virt-enabling-usb-host-passthrough.adoc | 2 - ...-enabling-volume-snapshot-boot-source.adoc | 3 +- modules/virt-example-bond-nncp.adoc | 6 +- .../virt-example-nmstate-IP-management.adoc | 12 ++-- ...ample-vm-node-placement-node-affinity.adoc | 2 + ...ample-vm-node-placement-node-selector.adoc | 2 + ...xample-vm-node-placement-pod-affinity.adoc | 2 + ...example-vm-node-placement-tolerations.adoc | 2 + ...t-expanding-storage-with-data-volumes.adoc | 1 - ...rt-exposing-pci-device-in-cluster-cli.adoc | 4 +- modules/virt-generalizing-linux-vm-image.adoc | 8 ++- .../virt-generalizing-windows-sysprep.adoc | 2 + modules/virt-golden-images-namespace-cli.adoc | 5 +- ...plugging-bridge-network-interface-cli.adoc | 13 ++-- modules/virt-hot-plugging-memory.adoc | 4 +- ...plugging-bridge-network-interface-cli.adoc | 5 +- modules/virt-initiating-vm-migration-cli.adoc | 3 +- modules/virt-latency-checkup-web-console.adoc | 4 +- modules/virt-loki-log-queries.adoc | 6 +- ...easuring-latency-vm-secondary-network.adoc | 14 ++--- modules/virt-metro-dr-odf.adoc | 5 +- modules/virt-monitoring-upgrade-status.adoc | 2 - modules/virt-node-network-config-console.adoc | 3 +- ...at-scale-in-openshift-data-foundation.adoc | 8 ++- modules/virt-options-configuring-mdevs.adoc | 1 - ...-gpu-operands-from-deploying-on-nodes.adoc | 8 +-- ...ates-during-control-plane-only-update.adoc | 21 ++++--- .../virt-pxe-booting-with-mac-address.adoc | 6 +- modules/virt-querying-metrics.adoc | 39 +++++++----- ...the-node-exporter-service-for-metrics.adoc | 3 +- modules/virt-regional-dr-odf.adoc | 5 +- modules/virt-remove-boot-order-item-web.adoc | 2 +- ...ving-mediated-device-from-cluster-cli.adoc | 1 - ...-removing-pci-device-from-cluster-cli.adoc | 6 +- .../virt-removing-vm-delete-protection.adoc | 2 - .../virt-restoring-vm-from-snapshot-cli.adoc | 3 +- modules/virt-running-real-time-checkup.adoc | 14 ++--- .../virt-setting-cpu-allocation-ratio.adoc | 2 - .../virt-specializing-windows-sysprep.adoc | 2 + modules/virt-starting-vm-web.adoc | 2 +- modules/virt-storage-checkup-web-console.adoc | 4 +- modules/virt-storage-wizard-fields-web.adoc | 1 - modules/virt-temporary-token-VNC.adoc | 2 +- ...ubleshooting-cert-rotation-parameters.adoc | 3 +- ...oubleshooting-incorrect-policy-config.adoc | 18 ++++-- .../virt-update-node-network-config-form.adoc | 2 +- modules/virt-updating-multiple-vms.adoc | 1 - modules/virt-using-virtctl-ssh-command.adoc | 5 +- ...-configure-higher-vm-workload-density.adoc | 13 ++-- .../virt-verify-status-bootsource-update.adoc | 4 +- ...utomatically-created-storage-profiles.adoc | 3 +- .../virt-viewing-downward-metrics-tool.adoc | 5 +- modules/virt-viewing-logs-cli.adoc | 10 ++-- .../virt-viewing-network-state-of-node.adoc | 3 +- modules/virt-viewing-vmi-ip-cli.adoc | 3 +- modules/virt-vm-behavior-dr.adoc | 18 +++--- modules/virt-vm-custom-scheduler.adoc | 6 +- modules/virt-vmware-comparison.adoc | 22 ++++--- modules/virt-wasp-agent-pod-eviction.adoc | 8 +-- 142 files changed, 451 insertions(+), 410 deletions(-) diff --git a/modules/virt-about-aaq-operator.adoc b/modules/virt-about-aaq-operator.adoc index 2c2eb4f74b58..0931a56098ab 100644 --- a/modules/virt-about-aaq-operator.adoc +++ b/modules/virt-about-aaq-operator.adoc @@ -21,7 +21,8 @@ The AAQ Operator introduces two new API objects defined as custom resource defin * `ApplicationAwareResourceQuota`: Sets aggregate quota restrictions enforced per namespace. The `ApplicationAwareResourceQuota` API is compatible with the native `ResourceQuota` object and shares the same specification and status definitions. + -.Example manifest +Example manifest: ++ [source,yaml] ---- apiVersion: aaq.kubevirt.io/v1alpha1 @@ -41,7 +42,8 @@ spec: * `ApplicationAwareClusterResourceQuota`: Mirrors the `ApplicationAwareResourceQuota` object at a cluster scope. It is compatible with the native `ClusterResourceQuota` API object and shares the same specification and status definitions. When creating an AAQ cluster quota, you can select multiple namespaces based on annotation selection, label selection, or both by editing the `spec.selector.labels` or `spec.selector.annotations` fields. + -.Example manifest +Example manifest: ++ [source,yaml] ---- apiVersion: aaq.kubevirt.io/v1alpha1 diff --git a/modules/virt-about-cloning.adoc b/modules/virt-about-cloning.adoc index d66786549aa4..306eb8d188ed 100644 --- a/modules/virt-about-cloning.adoc +++ b/modules/virt-about-cloning.adoc @@ -47,7 +47,7 @@ When the requirements for neither Container Storage Interface (CSI) volume cloni Host-assisted cloning uses a source pod and a target pod to copy data from the source volume to the target volume. The target persistent volume claim (PVC) is annotated with the fallback reason that explains why host-assisted cloning has been used, and an event is created. -.Example PVC target annotation +Example PVC target annotation: [source,yaml] ---- @@ -60,7 +60,7 @@ metadata: cdi.kubevirt.io/cloneType: copy ---- -.Example event +Example event: [source,terminal] ---- diff --git a/modules/virt-about-instance-types.adoc b/modules/virt-about-instance-types.adoc index 8f4f02d253c8..423e8206b4c4 100644 --- a/modules/virt-about-instance-types.adoc +++ b/modules/virt-about-instance-types.adoc @@ -31,7 +31,6 @@ Because instance types require defined CPU and memory attributes, {VirtProductNa You can manually create an instance type manifest. For example: -.Example YAML file with required fields [source,yaml] ---- apiVersion: instancetype.kubevirt.io/v1beta1 @@ -49,7 +48,6 @@ spec: You can create an instance type manifest by using the `virtctl` CLI utility. For example: -.Example `virtctl` command with required fields [source,terminal] ---- $ virtctl create instancetype --cpu 2 --memory 256Mi diff --git a/modules/virt-about-ksm.adoc b/modules/virt-about-ksm.adoc index c89201f5e3fb..ad15ddd3bb13 100644 --- a/modules/virt-about-ksm.adoc +++ b/modules/virt-about-ksm.adoc @@ -14,11 +14,7 @@ You can configure {VirtProductName} to activate kernel samepage merging (KSM) wh You can enable or disable the KSM activation feature for all nodes by using the {product-title} web console or by editing the `HyperConverged` custom resource (CR). The `HyperConverged` CR supports more granular configuration. -[discrete] -[id="virt-ksm-cr-configuration"] -=== CR configuration - -You can configure the KSM activation feature by editing the `spec.configuration.ksmConfiguration` stanza of the `HyperConverged` CR. +You can configure the KSM activation feature by editing the `spec.configuration.ksmConfiguration` stanza of the `HyperConverged` CR: * You enable the feature and configure settings by editing the `ksmConfiguration` stanza. diff --git a/modules/virt-about-scratch-space.adoc b/modules/virt-about-scratch-space.adoc index 088806c7ed30..cef757acc3e1 100644 --- a/modules/virt-about-scratch-space.adoc +++ b/modules/virt-about-scratch-space.adoc @@ -21,7 +21,7 @@ CDI requires requesting scratch space with a `file` volume mode, regardless of t If the origin PVC is backed by `block` volume mode, you must define a storage class capable of provisioning `file` volume mode PVCs. ==== -[discrete] +[id="scratch-space-manual-provisioning_{context}"] == Manual provisioning If there are no storage classes, CDI uses any PVCs in the project that match the size requirements for the image. diff --git a/modules/virt-about-static-and-dynamic-ssh-keys.adoc b/modules/virt-about-static-and-dynamic-ssh-keys.adoc index 64ef9a81f4c6..9dcbb32cdb26 100644 --- a/modules/virt-about-static-and-dynamic-ssh-keys.adoc +++ b/modules/virt-about-static-and-dynamic-ssh-keys.adoc @@ -13,7 +13,6 @@ You can add public SSH keys to virtual machines (VMs) statically at first boot o Only {op-system-base-full} 9 supports dynamic key injection. ==== -[discrete] [id="static-key-management_{context}"] == Static SSH key management @@ -24,11 +23,10 @@ You can add the key by using one of the following methods: * Add a key to a single VM when you create it by using the web console or the command line. * Add a key to a project by using the web console. Afterwards, the key is automatically added to the VMs that you create in this project. -.Use cases +Use cases: * As a VM owner, you can provision all your newly created VMs with a single key. -[discrete] [id="dynamic-key-management_{context}"] == Dynamic SSH key management @@ -36,7 +34,7 @@ You can enable dynamic SSH key management for a VM with {op-system-base-full} 9 When dynamic key management is disabled, the default key management setting of a VM is determined by the image used for the VM. -.Use cases +Use cases: * Granting or revoking access to VMs: As a cluster administrator, you can grant or revoke remote VM access by adding or removing the keys of individual users from a `Secret` object that is applied to all VMs in a namespace. * User access: You can add your access credentials to all VMs that you create and manage. diff --git a/modules/virt-about-storage-pools-pvc-templates.adoc b/modules/virt-about-storage-pools-pvc-templates.adoc index 05640db9535f..47e9db39f367 100644 --- a/modules/virt-about-storage-pools-pvc-templates.adoc +++ b/modules/virt-about-storage-pools-pvc-templates.adoc @@ -12,7 +12,6 @@ A storage pool created with a PVC template can contain multiple HPP volumes. Spl The PVC template is based on the `spec` stanza of the `PersistentVolumeClaim` object: -.Example `PersistentVolumeClaim` object [source,yaml] ---- apiVersion: v1 diff --git a/modules/virt-about-vm-snapshots.adoc b/modules/virt-about-vm-snapshots.adoc index 12ac47075686..3d3ab25f8487 100644 --- a/modules/virt-about-vm-snapshots.adoc +++ b/modules/virt-about-vm-snapshots.adoc @@ -29,7 +29,8 @@ Cloning a VM with a vTPM device attached to it or creating a new VM from its sna * Restore a VM from a snapshot * Delete an existing VM snapshot -.VM snapshot controller and custom resources +[id="vm-snapshot-controller-and-custom-resources_{context}"] +== VM snapshot controller and custom resources The VM snapshot feature introduces three new API objects defined as custom resource definitions (CRDs) for managing snapshots: diff --git a/modules/virt-about-workload-updates.adoc b/modules/virt-about-workload-updates.adoc index 470b3abd8fa9..531e18fb389f 100644 --- a/modules/virt-about-workload-updates.adoc +++ b/modules/virt-about-workload-updates.adoc @@ -33,7 +33,6 @@ If you enable both `LiveMigrate` and `Evict`: * VMIs that do not support live migration use the `Evict` update strategy. If a VMI is controlled by a `VirtualMachine` object that has `runStrategy: Always` set, a new VMI is created in a new pod with updated components. -[discrete] [id="migration-attempts-timeouts_{context}"] == Migration attempts and timeouts diff --git a/modules/virt-accessing-exported-vm-manifests.adoc b/modules/virt-accessing-exported-vm-manifests.adoc index 714dc82a59b2..0ce6f0364e7f 100644 --- a/modules/virt-accessing-exported-vm-manifests.adoc +++ b/modules/virt-accessing-exported-vm-manifests.adoc @@ -51,10 +51,10 @@ $ oc get secret export-token- -o jsonpath={.data.token} | base64 -- $ oc get vmexport -o yaml ---- -. Review the `status.links` stanza, which is divided into `external` and `internal` sections. Note the `manifests.url` fields within each section: +. Review the `status.links` stanza, which is divided into `external` and `internal` sections. Note the `manifests.url` fields within each section. ++ +For example: + -.Example output - [source,yaml] ---- apiVersion: export.kubevirt.io/v1beta1 diff --git a/modules/virt-accessing-node-exporter-outside-cluster.adoc b/modules/virt-accessing-node-exporter-outside-cluster.adoc index ff8370fd4206..6e888968867c 100644 --- a/modules/virt-accessing-node-exporter-outside-cluster.adoc +++ b/modules/virt-accessing-node-exporter-outside-cluster.adoc @@ -15,33 +15,35 @@ You can access the node-exporter service outside the cluster and view the expose .Procedure -. Expose the node-exporter service. +. Expose the node-exporter service: + [source,terminal] ---- $ oc expose service -n ---- -. Obtain the FQDN (Fully Qualified Domain Name) for the route. +. Obtain the FQDN (Fully Qualified Domain Name) for the route: + [source,terminal] ---- $ oc get route -o=custom-columns=NAME:.metadata.name,DNS:.spec.host ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME DNS node-exporter-service node-exporter-service-dynamation.apps.cluster.example.org ---- -. Use the `curl` command to display metrics for the node-exporter service. +. Use the `curl` command to display metrics for the node-exporter service: + [source,terminal] ---- $ curl -s http://node-exporter-service-dynamation.apps.cluster.example.org/metrics ---- + -.Example output +Example output: ++ [source,terminal] ---- go_gc_duration_seconds{quantile="0"} 1.5382e-05 diff --git a/modules/virt-accessing-rdp-console.adoc b/modules/virt-accessing-rdp-console.adoc index 82a67e88bb64..e0472d45e969 100644 --- a/modules/virt-accessing-rdp-console.adoc +++ b/modules/virt-accessing-rdp-console.adoc @@ -82,7 +82,8 @@ $ oc create -f .yaml $ oc get service -n example-namespace ---- + -.Example output for `NodePort` service +Example output for `NodePort` service: ++ [source,terminal] ---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE @@ -96,7 +97,8 @@ rdpservice NodePort 172.30.232.73 3389:30000/TCP 5m $ oc get node -o wide ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP diff --git a/modules/virt-add-boot-order-web.adoc b/modules/virt-add-boot-order-web.adoc index b90a4145d106..0a035914e36e 100644 --- a/modules/virt-add-boot-order-web.adoc +++ b/modules/virt-add-boot-order-web.adoc @@ -24,7 +24,7 @@ Add items to a boot order list by using the web console. . Add any additional disks or NICs to the boot order list. . Click *Save*. - ++ [NOTE] ==== If the virtual machine is running, changes to *Boot Order* will not take effect until you restart the virtual machine. diff --git a/modules/virt-add-disk-to-vm.adoc b/modules/virt-add-disk-to-vm.adoc index a286cb762df5..fe9137d6eb80 100644 --- a/modules/virt-add-disk-to-vm.adoc +++ b/modules/virt-add-disk-to-vm.adoc @@ -23,7 +23,7 @@ You can add a virtual disk to a virtual machine (VM) by using the {product-title .. Optional: You can clear *Apply optimized StorageProfile settings* to change the *Volume Mode* and *Access Mode* for the virtual disk. If you do not specify these parameters, the system uses the default values from the `kubevirt-storage-class-defaults` config map. . Click *Add*. - ++ [NOTE] ==== If the VM is running, you must restart the VM to apply the change. diff --git a/modules/virt-adding-a-boot-source-web.adoc b/modules/virt-adding-a-boot-source-web.adoc index 220f9d81fb68..2dfe5c4113ae 100644 --- a/modules/virt-adding-a-boot-source-web.adoc +++ b/modules/virt-adding-a-boot-source-web.adoc @@ -48,4 +48,6 @@ Provided boot sources are updated automatically to the latest version of the ope .. Click *Save and import* if you imported content from a URL or the registry. .. Click *Save and clone* if you cloned an existing PVC. +.Result + Your custom virtual machine template with a boot source is listed on the *Catalog* page. You can use this template to create a virtual machine. diff --git a/modules/virt-adding-public-key-vm-cli.adoc b/modules/virt-adding-public-key-vm-cli.adoc index bebd28297b70..0ab3f831c8d7 100644 --- a/modules/virt-adding-public-key-vm-cli.adoc +++ b/modules/virt-adding-public-key-vm-cli.adoc @@ -17,9 +17,8 @@ The key is added to the VM as a cloud-init data source. This method separates th .Procedure -. Create a manifest file for a `VirtualMachine` object and a `Secret` object: +. Create a manifest file for a `VirtualMachine` object and a `Secret` object, for example: + -.Example manifest [source,yaml] ---- include::snippets/virt-static-key.yaml[] @@ -50,7 +49,8 @@ $ virtctl start vm example-vm -n example-namespace $ oc describe vm example-vm -n example-namespace ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-adding-vm-to-service-mesh.adoc b/modules/virt-adding-vm-to-service-mesh.adoc index 3a4f827b4102..b366f20c7195 100644 --- a/modules/virt-adding-vm-to-service-mesh.adoc +++ b/modules/virt-adding-vm-to-service-mesh.adoc @@ -22,9 +22,8 @@ To avoid port conflicts, do not use ports used by the Istio sidecar proxy. These .Procedure -. Edit the VM configuration file to add the `sidecar.istio.io/inject: "true"` annotation: +. Edit the VM configuration file to add the `sidecar.istio.io/inject: "true"` annotation. For example: + -.Example configuration file [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-additional-scc-for-kubevirt-controller.adoc b/modules/virt-additional-scc-for-kubevirt-controller.adoc index c7a8606fc2b9..81af6cfcfc63 100644 --- a/modules/virt-additional-scc-for-kubevirt-controller.adoc +++ b/modules/virt-additional-scc-for-kubevirt-controller.adoc @@ -14,10 +14,12 @@ The `kubevirt-controller` service account is granted additional SCCs and Linux c The `kubevirt-controller` service account is granted the following SCCs: -* `scc.AllowHostDirVolumePlugin = true` + +* `scc.AllowHostDirVolumePlugin = true` ++ This allows virtual machines to use the hostpath volume plugin. -* `scc.AllowPrivilegedContainer = false` + +* `scc.AllowPrivilegedContainer = false` ++ This ensures the virt-launcher pod is not run as a privileged container. * `scc.AllowedCapabilities = []corev1.Capability{"SYS_NICE", "NET_BIND_SERVICE"}` @@ -25,7 +27,8 @@ This ensures the virt-launcher pod is not run as a privileged container. ** `SYS_NICE` allows setting the CPU affinity. ** `NET_BIND_SERVICE` allows DHCP and Slirp operations. -.Viewing the SCC and RBAC definitions for the kubevirt-controller +[id="viewing-scc-and-rbac-definitions_{context}"] +== Viewing the SCC and RBAC definitions for the kubevirt-controller You can view the `SecurityContextConstraints` definition for the `kubevirt-controller` by using the `oc` tool: diff --git a/modules/virt-analyzing-datavolume-conditions-and-events.adoc b/modules/virt-analyzing-datavolume-conditions-and-events.adoc index 00dce9aaf6f0..5fb059990fef 100644 --- a/modules/virt-analyzing-datavolume-conditions-and-events.adoc +++ b/modules/virt-analyzing-datavolume-conditions-and-events.adoc @@ -30,7 +30,6 @@ The `Message` indicates which PVC owns the data volume. long the PVC has been bound (`Age`) and by what resource (`From`), in this case `datavolume-controller`: + -.Example output [source,terminal] ---- Status: @@ -64,7 +63,6 @@ From this information, you conclude that an import operation was running, creating contention for other operations that are attempting to access the data volume: + -.Example output [source,terminal] ---- Status: @@ -87,7 +85,6 @@ Status: to be used, as in the following example. If the data volume is not ready to be used, the `Status` is `False`: + -.Example output [source,terminal] ---- Status: diff --git a/modules/virt-assigning-pci-device-virtual-machine.adoc b/modules/virt-assigning-pci-device-virtual-machine.adoc index 3815f8e410a7..dab5b8a6d4c7 100644 --- a/modules/virt-assigning-pci-device-virtual-machine.adoc +++ b/modules/virt-assigning-pci-device-virtual-machine.adoc @@ -9,9 +9,8 @@ When a PCI device is available in a cluster, you can assign it to a virtual machine and enable PCI passthrough. .Procedure -* Assign the PCI device to a virtual machine as a host device. +* Assign the PCI device to a virtual machine as a host device. For example: + -.Example [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -31,7 +30,8 @@ spec: [source,terminal] $ lspci -nnk | grep NVIDIA + -.Example output +Example output: ++ [source,terminal] ---- $ 02:01.0 3D controller [0302]: NVIDIA Corporation GV100GL [Tesla V100 PCIe 32GB] [10de:1eb8] (rev a1) diff --git a/modules/virt-assigning-vgpu-vm-cli.adoc b/modules/virt-assigning-vgpu-vm-cli.adoc index a0a30dd4a123..044cce7e809c 100644 --- a/modules/virt-assigning-vgpu-vm-cli.adoc +++ b/modules/virt-assigning-vgpu-vm-cli.adoc @@ -15,9 +15,8 @@ Assign mediated devices such as virtual GPUs (vGPUs) to virtual machines (VMs). .Procedure -* Assign the mediated device to a virtual machine (VM) by editing the `spec.domain.devices.gpus` stanza of the `VirtualMachine` manifest: +* Assign the mediated device to a virtual machine (VM) by editing the `spec.domain.devices.gpus` stanza of the `VirtualMachine` manifest. For example: + -.Example virtual machine manifest [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -41,4 +40,4 @@ spec: [source,terminal] ---- $ lspci -nnk | grep ----- \ No newline at end of file +---- diff --git a/modules/virt-attaching-virtio-disk-to-windows.adoc b/modules/virt-attaching-virtio-disk-to-windows.adoc index acb9e300f93b..bea761165926 100644 --- a/modules/virt-attaching-virtio-disk-to-windows.adoc +++ b/modules/virt-attaching-virtio-disk-to-windows.adoc @@ -15,4 +15,6 @@ You must attach the VirtIO container disk to the Windows VM to install the neces . Click the *Customize VirtualMachine parameters*. . Click *Create VirtualMachine*. +.Result + After the VM is created, the `virtio-win` SATA CD disk will be attached to the VM. diff --git a/modules/virt-attaching-vm-to-primary-udn.adoc b/modules/virt-attaching-vm-to-primary-udn.adoc index d9033e202df7..2b0dda2fec2b 100644 --- a/modules/virt-attaching-vm-to-primary-udn.adoc +++ b/modules/virt-attaching-vm-to-primary-udn.adoc @@ -14,7 +14,6 @@ You can connect a virtual machine (VM) to the primary user-defined network (UDN) .Procedure . Edit the `VirtualMachine` manifest to add the UDN interface details, as in the following example: + -.Example `VirtualMachine` manifest [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -47,4 +46,4 @@ spec: [source,terminal] ---- $ oc apply -f .yaml ----- \ No newline at end of file +---- diff --git a/modules/virt-attaching-vm-to-secondary-udn.adoc b/modules/virt-attaching-vm-to-secondary-udn.adoc index 6b35276fda5e..70ba0459cc18 100644 --- a/modules/virt-attaching-vm-to-secondary-udn.adoc +++ b/modules/virt-attaching-vm-to-secondary-udn.adoc @@ -14,7 +14,6 @@ You can connect a virtual machine (VM) to multiple secondary cluster-scoped user .Procedure . Edit the `VirtualMachine` manifest to add the CUDN interface details, as in the following example: + -.Example `VirtualMachine` manifest [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -55,4 +54,4 @@ $ oc apply -f .yaml + where: -:: Specifies the name of your `VirtualMachine` manifest YAML file. \ No newline at end of file +:: Specifies the name of your `VirtualMachine` manifest YAML file. diff --git a/modules/virt-automatic-certificates-renewal.adoc b/modules/virt-automatic-certificates-renewal.adoc index ad70465b00a3..9bc3a36e9282 100644 --- a/modules/virt-automatic-certificates-renewal.adoc +++ b/modules/virt-automatic-certificates-renewal.adoc @@ -8,8 +8,6 @@ TLS certificates for {VirtProductName} components are renewed and rotated automatically. You are not required to refresh them manually. -.Automatic renewal schedules - TLS certificates are automatically deleted and replaced according to the following schedule: * KubeVirt certificates are renewed daily. diff --git a/modules/virt-autoupdate-custom-bootsource.adoc b/modules/virt-autoupdate-custom-bootsource.adoc index 7ed99472659b..25721dc7bd50 100644 --- a/modules/virt-autoupdate-custom-bootsource.adoc +++ b/modules/virt-autoupdate-custom-bootsource.adoc @@ -25,7 +25,6 @@ $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} . Edit the `HyperConverged` CR, adding the appropriate template and boot source in the `dataImportCronTemplates` section. For example: + -.Example custom resource [source,yaml] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-binding-devices-vfio-driver.adoc b/modules/virt-binding-devices-vfio-driver.adoc index 02504353e16a..ca6ec16efd2f 100644 --- a/modules/virt-binding-devices-vfio-driver.adoc +++ b/modules/virt-binding-devices-vfio-driver.adoc @@ -5,6 +5,7 @@ :_mod-docs-content-type: PROCEDURE [id="virt-binding-devices-vfio-driver_{context}"] = Binding PCI devices to the VFIO driver + To bind PCI devices to the VFIO (Virtual Function I/O) driver, obtain the values for `vendor-ID` and `device-ID` from each device and create a list with the values. Add this list to the `MachineConfig` object. The `MachineConfig` Operator generates the `/etc/modprobe.d/vfio.conf` on the nodes with the PCI devices, and binds the PCI devices to the VFIO driver. .Prerequisites @@ -12,14 +13,15 @@ To bind PCI devices to the VFIO (Virtual Function I/O) driver, obtain the values * You have installed the {oc-first}. .Procedure -. Run the `lspci` command to obtain the `vendor-ID` and the `device-ID` for the PCI device. +. Run the `lspci` command to obtain the `vendor-ID` and the `device-ID` for the PCI device: + [source,terminal] ---- $ lspci -nnv | grep -i nvidia ---- + -.Example output +Example output: ++ [source,terminal] ---- 02:01.0 3D controller [0302]: NVIDIA Corporation GV100GL [Tesla V100 PCIe 32GB] [10de:1eb8] (rev a1) @@ -32,7 +34,8 @@ $ lspci -nnv | grep -i nvidia include::snippets/butane-version.adoc[] ==== + -.Example +For example: ++ [source,yaml,subs="attributes+"] ---- variant: openshift @@ -73,14 +76,15 @@ $ butane 100-worker-vfiopci.bu -o 100-worker-vfiopci.yaml $ oc apply -f 100-worker-vfiopci.yaml ---- -. Verify that the `MachineConfig` object was added. +. Verify that the `MachineConfig` object was added: + [source,terminal] ---- $ oc get MachineConfig ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE @@ -95,15 +99,17 @@ NAME GENERATEDBYCONTROLLER IGNI ---- .Verification -* Verify that the VFIO driver is loaded. +* Verify that the VFIO driver is loaded: + [source,terminal] ---- $ lspci -nnk -d 10de: ---- ++ The output confirms that the VFIO driver is being used. + -.Example output +Example output: ++ ---- 04:00.0 3D controller [0302]: NVIDIA Corporation GP102GL [Tesla P40] [10de:1eb8] (rev a1) Subsystem: NVIDIA Corporation Device [10de:1eb8] diff --git a/modules/virt-booting-vms-uefi-mode.adoc b/modules/virt-booting-vms-uefi-mode.adoc index 0550cb9f7f56..35019ba6b1e8 100644 --- a/modules/virt-booting-vms-uefi-mode.adoc +++ b/modules/virt-booting-vms-uefi-mode.adoc @@ -16,7 +16,8 @@ You can configure a virtual machine to boot in UEFI mode by editing the `Virtual . Edit or create a `VirtualMachine` manifest file. Use the `spec.firmware.bootloader` stanza to configure UEFI mode: + -.Booting in UEFI mode with secure boot active +For example, to boot in UEFI mode with secure boot active: ++ [source,yaml] ---- apiversion: kubevirt.io/v1 diff --git a/modules/virt-cdi-supported-operations-matrix.adoc b/modules/virt-cdi-supported-operations-matrix.adoc index 9db52fd486d3..5f70f27d98a7 100644 --- a/modules/virt-cdi-supported-operations-matrix.adoc +++ b/modules/virt-cdi-supported-operations-matrix.adoc @@ -20,45 +20,65 @@ This matrix shows the supported CDI operations for content types against endpoin |Content types | HTTP | HTTPS | HTTP basic auth | Registry | Upload | KubeVirt (QCOW2) -|✓ QCOW2 + -✓ GZ* + +a|✓ QCOW2 + +✓ GZ* + ✓ XZ* -|✓ QCOW2** + -✓ GZ* + +a|✓ QCOW2** + +✓ GZ* + ✓ XZ* -|✓ QCOW2 + -✓ GZ* + +a|✓ QCOW2 + +✓ GZ* + ✓ XZ* -| ✓ QCOW2* + -□ GZ + +a| ✓ QCOW2* + +□ GZ + □ XZ -| ✓ QCOW2* + -✓ GZ* + +| ✓ QCOW2* + +✓ GZ* + ✓ XZ* | KubeVirt (RAW) -|✓ RAW + -✓ GZ + +a|✓ RAW + +✓ GZ + ✓ XZ -|✓ RAW + -✓ GZ + +a|✓ RAW + +✓ GZ + ✓ XZ -| ✓ RAW + -✓ GZ + +a| ✓ RAW + +✓ GZ + ✓ XZ -| ✓ RAW* + -□ GZ + +a| ✓ RAW* + +□ GZ + □ XZ -| ✓ RAW* + -✓ GZ* + +a| ✓ RAW* + +✓ GZ* + ✓ XZ* |=== diff --git a/modules/virt-checking-cluster-dpdk-readiness.adoc b/modules/virt-checking-cluster-dpdk-readiness.adoc index a9bac8133cf0..51aa980fd313 100644 --- a/modules/virt-checking-cluster-dpdk-readiness.adoc +++ b/modules/virt-checking-cluster-dpdk-readiness.adoc @@ -24,11 +24,9 @@ You run a DPDK checkup by performing the following steps: .Procedure -. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest for the DPDK checkup: +. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest for the DPDK checkup. For example: + -.Example service account, role, and rolebinding manifest file [%collapsible] -==== [source,yaml] ---- --- @@ -85,7 +83,6 @@ roleRef: kind: Role name: kubevirt-dpdk-checker ---- -==== . Apply the `ServiceAccount`, `Role`, and `RoleBinding` manifest: + @@ -94,9 +91,8 @@ roleRef: $ oc apply -n -f .yaml ---- -. Create a `ConfigMap` manifest that contains the input parameters for the checkup: +. Create a `ConfigMap` manifest that contains the input parameters for the checkup. For example: + -.Example input config map [source,yaml] ---- apiVersion: v1 @@ -122,9 +118,8 @@ data: $ oc apply -n -f .yaml ---- -. Create a `Job` manifest to run the checkup: +. Create a `Job` manifest to run the checkup. For example: + -.Example job manifest [source,yaml,subs="attributes+"] ---- apiVersion: batch/v1 @@ -182,7 +177,8 @@ $ oc wait job dpdk-checkup -n --for condition=complete --time $ oc get configmap dpdk-checkup-config -n -o yaml ---- + -.Example output config map (success) +Example output config map (success): ++ [source,yaml] ---- apiVersion: v1 diff --git a/modules/virt-checking-storage-configuration.adoc b/modules/virt-checking-storage-configuration.adoc index d5fb5739d977..f9aabb5ce59d 100644 --- a/modules/virt-checking-storage-configuration.adoc +++ b/modules/virt-checking-storage-configuration.adoc @@ -32,11 +32,9 @@ subjects: .Procedure -. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest file for the storage checkup: +. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest file for the storage checkup. For example: + -.Example service account, role, and rolebinding manifest [%collapsible] -==== [source,yaml] ---- --- @@ -84,7 +82,6 @@ roleRef: kind: Role name: storage-checkup-role ---- -==== . Apply the `ServiceAccount`, `Role`, and `RoleBinding` manifest in the target namespace: + @@ -93,9 +90,8 @@ roleRef: $ oc apply -n -f .yaml ---- -. Create a `ConfigMap` and `Job` manifest file. The config map contains the input parameters for the checkup job. +. Create a `ConfigMap` and `Job` manifest file. The config map contains the input parameters for the checkup job. For example: + -.Example input config map and job manifest [source,yaml,subs="attributes+"] ---- --- @@ -152,7 +148,8 @@ $ oc wait job storage-checkup -n --for condition=complete --t $ oc get configmap storage-checkup-config -n -o yaml ---- + -.Example output config map (success) +Example output config map (success): ++ [source,yaml,subs="attributes+"] ---- apiVersion: v1 diff --git a/modules/virt-cloning-a-datavolume.adoc b/modules/virt-cloning-a-datavolume.adoc index c776c4f1ffeb..1c571dc0521a 100644 --- a/modules/virt-cloning-a-datavolume.adoc +++ b/modules/virt-cloning-a-datavolume.adoc @@ -15,7 +15,8 @@ You can smart-clone a persistent volume claim (PVC) by using the command line to * The source and target PVCs must have the same storage provider and volume mode. * The value of the `driver` key of the `VolumeSnapshotClass` object must match the value of the `provisioner` key of the `StorageClass` object as shown in the following example: + -.Example `VolumeSnapshotClass` object +Example `VolumeSnapshotClass` object: ++ [source,yaml] ---- kind: VolumeSnapshotClass @@ -24,7 +25,8 @@ driver: openshift-storage.rbd.csi.ceph.com # ... ---- + -.Example `StorageClass` object +Example `StorageClass` object: ++ [source,yaml] ---- kind: StorageClass diff --git a/modules/virt-cloning-pvc-to-dv-cli.adoc b/modules/virt-cloning-pvc-to-dv-cli.adoc index b2a8e6d8d30a..a26dda39c57a 100644 --- a/modules/virt-cloning-pvc-to-dv-cli.adoc +++ b/modules/virt-cloning-pvc-to-dv-cli.adoc @@ -31,7 +31,8 @@ endif::openshift-rosa,openshift-dedicated[] ** The source and target PVCs must have the same storage provider and volume mode. ** The value of the `driver` key of the `VolumeSnapshotClass` object must match the value of the `provisioner` key of the `StorageClass` object as shown in the following example: + -.Example `VolumeSnapshotClass` object +Example `VolumeSnapshotClass` object: ++ [source,yaml] ---- kind: VolumeSnapshotClass @@ -40,7 +41,8 @@ driver: openshift-storage.rbd.csi.ceph.com # ... ---- + -.Example `StorageClass` object +Example `StorageClass` object: ++ [source,yaml] ---- kind: StorageClass diff --git a/modules/virt-cluster-resource-requirements.adoc b/modules/virt-cluster-resource-requirements.adoc index 5827a41c1b22..ad03d07688c2 100644 --- a/modules/virt-cluster-resource-requirements.adoc +++ b/modules/virt-cluster-resource-requirements.adoc @@ -13,26 +13,25 @@ The numbers noted in this documentation are based on Red Hat's test methodology and setup. These numbers can vary based on your own individual setup and environments. ==== -[discrete] [id="memory-overhead_{context}"] == Memory overhead Calculate the memory overhead values for {VirtProductName} by using the equations below. -.Cluster memory overhead - +Cluster memory overhead:: ++ ---- Memory overhead per infrastructure node ≈ 150 MiB ---- - ++ ---- Memory overhead per worker node ≈ 360 MiB ---- - ++ Additionally, {VirtProductName} environment resources require a total of 2179 MiB of RAM that is spread across all infrastructure nodes. -.Virtual machine memory overhead - +Virtual machine memory overhead:: ++ ---- Memory overhead per virtual machine ≈ (1.002 × requested memory) \ + 218 MiB \ <1> @@ -48,48 +47,46 @@ Memory overhead per virtual machine ≈ (1.002 × requested memory) \ * If Secure Encrypted Virtualization (SEV) is enabled, add 256 MiB. * If Trusted Platform Module (TPM) is enabled, add 53 MiB. -[discrete] [id="CPU-overhead_{context}"] == CPU overhead Calculate the cluster processor overhead requirements for {VirtProductName} by using the equation below. The CPU overhead per virtual machine depends on your individual setup. -.Cluster CPU overhead - +Cluster CPU overhead:: ++ ---- CPU overhead for infrastructure nodes ≈ 4 cores ---- - ++ {VirtProductName} increases the overall utilization of cluster level services such as logging, routing, and monitoring. To account for this workload, ensure that nodes that host infrastructure components have capacity allocated for 4 additional cores (4000 millicores) distributed across those nodes. - ++ ---- CPU overhead for worker nodes ≈ 2 cores + CPU overhead per virtual machine ---- - ++ Each worker node that hosts virtual machines must have capacity for 2 additional cores (2000 millicores) for {VirtProductName} management workloads in addition to the CPUs required for virtual machine workloads. -.Virtual machine CPU overhead - +Virtual machine CPU overhead:: ++ If dedicated CPUs are requested, there is a 1:1 impact on the cluster CPU overhead requirement. Otherwise, there are no specific rules about how many CPUs a virtual machine requires. -[discrete] [id="storage-overhead_{context}"] == Storage overhead Use the guidelines below to estimate storage overhead requirements for your {VirtProductName} environment. -.Cluster storage overhead - +Cluster storage overhead:: ++ ---- Aggregated storage overhead per node ≈ 10 GiB ---- - ++ 10 GiB is the estimated on-disk storage impact for each node in the cluster when you install {VirtProductName}. -.Virtual machine storage overhead - +Virtual machine storage overhead:: ++ Storage overhead per virtual machine depends on specific requests for resource allocation within the virtual machine. The request could be for ephemeral storage on the node or storage resources hosted elsewhere in the cluster. {VirtProductName} does not currently allocate any additional ephemeral storage for the running container itself. -.Example - +Example:: ++ As a cluster administrator, if you plan to host 10 virtual machines in the cluster, each with 1 GiB of RAM and 2 vCPUs, the memory impact across the cluster is 11.68 GiB. The estimated on-disk storage impact for each node in the cluster is 10 GiB and the CPU impact for worker nodes that host virtual machine workloads is a minimum of 2 cores. diff --git a/modules/virt-configure-multiple-iothreads.adoc b/modules/virt-configure-multiple-iothreads.adoc index 8a418ea6bfbe..468c05ad2684 100644 --- a/modules/virt-configure-multiple-iothreads.adoc +++ b/modules/virt-configure-multiple-iothreads.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="virt-configure-multiple-iothreads_{context}"] -== Configuring multiple IOThreads for fast storage access += Configuring multiple IOThreads for fast storage access You can improve storage performance by configuring multiple IOThreads for a virtual machine (VM) that uses fast storage, such as solid-state drive (SSD) or non-volatile memory express (NVMe). This configuration option is only available by editing YAML of the VM. @@ -37,10 +37,11 @@ domain: bus: virtio # ... ---- - -. Click *Save*. - ++ [IMPORTANT] ==== The `spec.template.spec.domain` setting cannot be changed while the VM is running. You must stop the VM before applying the changes, and then restart the VM for the new settings to take effect. ==== + +. Click *Save*. + diff --git a/modules/virt-configuring-cluster-dpdk.adoc b/modules/virt-configuring-cluster-dpdk.adoc index 911574758f8d..c795dd3bd34b 100644 --- a/modules/virt-configuring-cluster-dpdk.adoc +++ b/modules/virt-configuring-cluster-dpdk.adoc @@ -35,9 +35,8 @@ $ rosa edit machinepool --cluster= node-role.kube ---- endif::openshift-rosa[] -.. Create a new `MachineConfigPool` manifest that contains the `worker-dpdk` label in the `spec.machineConfigSelector` object: +.. Create a new `MachineConfigPool` manifest that contains the `worker-dpdk` label in the `spec.machineConfigSelector` object. For example: + -.Example `MachineConfigPool` manifest [source,yaml] ---- apiVersion: machineconfiguration.openshift.io/v1 @@ -59,9 +58,8 @@ spec: node-role.kubernetes.io/worker-dpdk: "" ---- -. Create a `PerformanceProfile` manifest that applies to the labeled nodes and the machine config pool that you created in the previous steps. The performance profile specifies the CPUs that are isolated for DPDK applications and the CPUs that are reserved for house keeping. +. Create a `PerformanceProfile` manifest that applies to the labeled nodes and the machine config pool that you created in the previous steps. The performance profile specifies the CPUs that are isolated for DPDK applications and the CPUs that are reserved for house keeping. For example: + -.Example `PerformanceProfile` manifest [source,yaml] ---- apiVersion: performance.openshift.io/v2 @@ -126,9 +124,8 @@ Enabling `AlignCPUs` allows {VirtProductName} to request up to two additional de emulator thread isolation. ==== -. Create an `SriovNetworkNodePolicy` object with the `spec.deviceType` field set to `vfio-pci`: +. Create an `SriovNetworkNodePolicy` object with the `spec.deviceType` field set to `vfio-pci`. For example: + -.Example `SriovNetworkNodePolicy` manifest [source,yaml] ---- apiVersion: sriovnetwork.openshift.io/v1 diff --git a/modules/virt-configuring-cluster-eviction-strategy-cli.adoc b/modules/virt-configuring-cluster-eviction-strategy-cli.adoc index 57d3099db496..e71eda196bb1 100644 --- a/modules/virt-configuring-cluster-eviction-strategy-cli.adoc +++ b/modules/virt-configuring-cluster-eviction-strategy-cli.adoc @@ -23,7 +23,6 @@ $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} . Set the cluster eviction strategy as shown in the following example: + -.Example cluster eviction strategy [source,yaml] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-configuring-cluster-real-time.adoc b/modules/virt-configuring-cluster-real-time.adoc index 99b3a3e84147..9c9ab6b39457 100644 --- a/modules/virt-configuring-cluster-real-time.adoc +++ b/modules/virt-configuring-cluster-real-time.adoc @@ -27,9 +27,8 @@ $ oc label node node-role.kubernetes.io/worker-realtime="" You must use the default `master` role for {sno} and compact clusters. ==== -. Create a new `MachineConfigPool` manifest that contains the `worker-realtime` label in the `spec.machineConfigSelector` object: +. Create a new `MachineConfigPool` manifest that contains the `worker-realtime` label in the `spec.machineConfigSelector` object. For example: + -.Example `MachineConfigPool` manifest [source,yaml] ---- apiVersion: machineconfiguration.openshift.io/v1 @@ -63,9 +62,8 @@ You do not need to create a new `MachineConfigPool` manifest for {sno} and compa $ oc apply -f .yaml ---- -. Create a `PerformanceProfile` manifest that applies to the labeled nodes and the machine config pool that you created in the previous steps: +. Create a `PerformanceProfile` manifest that applies to the labeled nodes and the machine config pool that you created in the previous steps. For example: + -.Example `PerformanceProfile` manifest [source,yaml] ---- apiVersion: performance.openshift.io/v2 @@ -137,4 +135,4 @@ $ oc patch hyperconverged kubevirt-hyperconverged -n {CNVNamespace} \ ==== Enabling `alignCPUs` allows {VirtProductName} to request up to two additional dedicated CPUs to bring the total CPU count to an even parity when using emulator thread isolation. -==== \ No newline at end of file +==== diff --git a/modules/virt-configuring-downward-metrics.adoc b/modules/virt-configuring-downward-metrics.adoc index 61278773bde0..b2b5411cf286 100644 --- a/modules/virt-configuring-downward-metrics.adoc +++ b/modules/virt-configuring-downward-metrics.adoc @@ -16,7 +16,6 @@ You enable the capturing of downward metrics for a host VM by creating a configu * Edit or create a YAML file that includes a `downwardMetrics` device, as shown in the following example: + -.Example downwardMetrics configuration file [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-interface-link-state.adoc b/modules/virt-configuring-interface-link-state.adoc index 33079c899300..ec70a94c5cfe 100644 --- a/modules/virt-configuring-interface-link-state.adoc +++ b/modules/virt-configuring-interface-link-state.adoc @@ -55,14 +55,15 @@ $ oc apply -f .yaml ---- .Verification -* Verify that the desired link state is set by checking the `status.interfaces.linkState` field of the `VirtualMachineInstance` manifest. +* Verify that the desired link state is set by checking the `status.interfaces.linkState` field of the `VirtualMachineInstance` manifest: + [source,terminal] ---- $ oc get vmi ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-live-migration-heavy.adoc b/modules/virt-configuring-live-migration-heavy.adoc index 8f99fc11363a..2078429049bb 100644 --- a/modules/virt-configuring-live-migration-heavy.adoc +++ b/modules/virt-configuring-live-migration-heavy.adoc @@ -28,7 +28,8 @@ Configure live migration for heavy workloads by updating the `HyperConverged` cu $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- + -.Example configuration file +For example: ++ [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1beta1 @@ -51,10 +52,10 @@ spec: <4> Maximum number of outbound migrations per node. Configure a single VM per node for heavy workloads. <5> The migration is canceled if memory copy fails to make progress in this time. This value is measured in seconds. Increase this parameter for large memory sizes running heavy workloads. <6> Use post copy mode when memory dirty rates are high to ensure the migration converges. Set `allowPostCopy` to `true` to enable post copy mode. - -. Optional: If your main network is too busy for the migration, configure a secondary, dedicated migration network. - ++ [NOTE] ==== Post copy mode can impact performance during the transfer, and should not be used for critical data, or with unstable networks. ==== + +. Optional: If your main network is too busy for the migration, configure a secondary, dedicated migration network. diff --git a/modules/virt-configuring-live-migration-limits.adoc b/modules/virt-configuring-live-migration-limits.adoc index 1a810c0598c7..915e487a7996 100644 --- a/modules/virt-configuring-live-migration-limits.adoc +++ b/modules/virt-configuring-live-migration-limits.adoc @@ -23,7 +23,8 @@ Configure live migration limits and timeouts for the cluster by updating the `Hy $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- + -.Example configuration file +For example: ++ [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1beta1 @@ -46,7 +47,7 @@ spec: <4> Maximum number of outbound migrations per node. Default: `2`. <5> The migration is canceled if memory copy fails to make progress in this time, in seconds. Default: `150`. <6> If a VM is running a heavy workload and the memory dirty rate is too high, this can prevent the migration from one node to another from converging. To prevent this, you can enable post copy mode. By default, `allowPostCopy` is set to `false`. - ++ [NOTE] ==== You can restore the default value for any `spec.liveMigrationConfig` field by deleting that key/value pair and saving the file. For example, delete `progressTimeout: ` to restore the default `progressTimeout: 150`. diff --git a/modules/virt-configuring-runstrategy-vm.adoc b/modules/virt-configuring-runstrategy-vm.adoc index 7eb939f8a8fc..0beb31261355 100644 --- a/modules/virt-configuring-runstrategy-vm.adoc +++ b/modules/virt-configuring-runstrategy-vm.adoc @@ -21,7 +21,8 @@ You can configure a run strategy for a virtual machine (VM) by using the command $ oc edit vm -n ---- + -.Example run strategy +For example: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-secondary-dns-server.adoc b/modules/virt-configuring-secondary-dns-server.adoc index 90e88244021e..bcf5cc290790 100644 --- a/modules/virt-configuring-secondary-dns-server.adoc +++ b/modules/virt-configuring-secondary-dns-server.adoc @@ -56,7 +56,8 @@ $ oc expose -n {CNVNamespace} deployment/secondary-dns --name=dns-lb \ $ oc get service -n {CNVNamespace} ---- + -.Example output +Example output: ++ [source,text] ---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE @@ -96,7 +97,8 @@ spec: $ oc get dnses.config.openshift.io cluster -o jsonpath='{.spec.baseDomain}' ---- + -.Example output +Example output: ++ [source,text] ---- openshift.example.com diff --git a/modules/virt-configuring-secondary-network-vm-live-migration.adoc b/modules/virt-configuring-secondary-network-vm-live-migration.adoc index 499861c03a56..b03ead11b474 100644 --- a/modules/virt-configuring-secondary-network-vm-live-migration.adoc +++ b/modules/virt-configuring-secondary-network-vm-live-migration.adoc @@ -20,7 +20,6 @@ To configure a dedicated secondary network for live migration, you must first cr . Create a `NetworkAttachmentDefinition` manifest according to the following example: + -.Example configuration file [source,yaml,subs="attributes+"] ---- apiVersion: "k8s.cni.cncf.io/v1" @@ -53,9 +52,8 @@ spec: $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- -. Add the name of the `NetworkAttachmentDefinition` object to the `spec.liveMigrationConfig` stanza of the `HyperConverged` CR: +. Add the name of the `NetworkAttachmentDefinition` object to the `spec.liveMigrationConfig` stanza of the `HyperConverged` CR. For example: + -.Example `HyperConverged` manifest [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-configuring-vm-dpdk.adoc b/modules/virt-configuring-vm-dpdk.adoc index 030f5671e100..ef90beb320d9 100644 --- a/modules/virt-configuring-vm-dpdk.adoc +++ b/modules/virt-configuring-vm-dpdk.adoc @@ -14,9 +14,8 @@ You can run Data Packet Development Kit (DPDK) workloads on virtual machines (VM * You have installed the {oc-first}. .Procedure -. Edit the `VirtualMachine` manifest to include information about the SR-IOV network interface, CPU topology, CRI-O annotations, and huge pages: +. Edit the `VirtualMachine` manifest to include information about the SR-IOV network interface, CPU topology, CRI-O annotations, and huge pages. For example: + -.Example `VirtualMachine` manifest [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-vm-eviction-strategy-cli.adoc b/modules/virt-configuring-vm-eviction-strategy-cli.adoc index 90e987cd6a4f..e4aa7b86d991 100644 --- a/modules/virt-configuring-vm-eviction-strategy-cli.adoc +++ b/modules/virt-configuring-vm-eviction-strategy-cli.adoc @@ -28,7 +28,8 @@ You must set the eviction strategy of non-migratable VMs to `LiveMigrateIfPossib $ oc edit vm -n ---- + -.Example eviction strategy +For example: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-vm-project-dpdk.adoc b/modules/virt-configuring-vm-project-dpdk.adoc index 4a7107ad1640..d8a128fe5239 100644 --- a/modules/virt-configuring-vm-project-dpdk.adoc +++ b/modules/virt-configuring-vm-project-dpdk.adoc @@ -20,9 +20,8 @@ You can configure the project to run DPDK workloads on SR-IOV hardware. $ oc create ns dpdk-checkup-ns ---- -. Create an `SriovNetwork` object that references the `SriovNetworkNodePolicy` object. When you create an `SriovNetwork` object, the SR-IOV Network Operator automatically creates a `NetworkAttachmentDefinition` object. +. Create an `SriovNetwork` object that references the `SriovNetworkNodePolicy` object. When you create an `SriovNetwork` object, the SR-IOV Network Operator automatically creates a `NetworkAttachmentDefinition` object. For example: + -.Example `SriovNetwork` manifest [source,yaml] ---- apiVersion: sriovnetwork.openshift.io/v1 diff --git a/modules/virt-configuring-vm-real-time.adoc b/modules/virt-configuring-vm-real-time.adoc index 8f1e340e8078..9680a7a69391 100644 --- a/modules/virt-configuring-vm-real-time.adoc +++ b/modules/virt-configuring-vm-real-time.adoc @@ -14,9 +14,8 @@ You can configure a virtual machine (VM) to run real-time workloads. * You have installed the {oc-first}. .Procedure -. Create a `VirtualMachine` manifest to include information about CPU topology, CRI-O annotations, and huge pages: +. Create a `VirtualMachine` manifest to include information about CPU topology, CRI-O annotations, and huge pages. For example: + -.Example `VirtualMachine` manifest [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -173,12 +172,16 @@ isolate_managed_irq=Y <2> ---- # cyclictest --priority 1 --policy fifo -h 50 -a 2-3 --mainaffinity 0,1 -t 2 -m -q -i 200 -D 12h ---- ++ +-- where: + `-a`:: Specifies the CPU set on which the test runs. This is the same as the isolated CPUs that you configured in the `realtime-variables.conf` file. `-D`:: Specifies the test duration. Append `m`, `h`, or `d` to specify minutes, hours or days. - +-- ++ +Example output: + -.Example output [source,terminal] ---- # Min Latencies: 00004 00004 diff --git a/modules/virt-configuring-vm-use-usb-device.adoc b/modules/virt-configuring-vm-use-usb-device.adoc index 5aca99dc2360..65ed85e38978 100644 --- a/modules/virt-configuring-vm-use-usb-device.adoc +++ b/modules/virt-configuring-vm-use-usb-device.adoc @@ -30,7 +30,6 @@ $ oc edit vmi vmi-usb . Edit the CR by adding a USB device, as shown in the following example: + -.Example configuration [source, yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-configuring-vm-with-node-exporter-service.adoc b/modules/virt-configuring-vm-with-node-exporter-service.adoc index 512a4086c8a7..4295827b4e61 100644 --- a/modules/virt-configuring-vm-with-node-exporter-service.adoc +++ b/modules/virt-configuring-vm-with-node-exporter-service.adoc @@ -67,7 +67,8 @@ $ sudo systemctl start node_exporter.service $ curl http://localhost:9100/metrics ---- + -.Example output +Example output: ++ [source,terminal] ---- go_gc_duration_seconds{quantile="0"} 1.5244e-05 diff --git a/modules/virt-connecting-secondary-network-ssh.adoc b/modules/virt-connecting-secondary-network-ssh.adoc index d43f5ee97dfc..5ef559dd384e 100644 --- a/modules/virt-connecting-secondary-network-ssh.adoc +++ b/modules/virt-connecting-secondary-network-ssh.adoc @@ -23,7 +23,8 @@ You can connect to a virtual machine (VM) attached to a secondary network by usi $ oc describe vm -n ---- + -.Example output +Example output: ++ ---- # ... Interfaces: @@ -44,7 +45,8 @@ Interfaces: $ ssh @ -i ---- + -.Example +For example: ++ [source,terminal] ---- $ ssh cloud-user@10.244.0.37 -i ~/.ssh/id_rsa_cloud-user diff --git a/modules/virt-connecting-vm-internal-fqdn.adoc b/modules/virt-connecting-vm-internal-fqdn.adoc index e2b12a65ce2a..5893334e79eb 100644 --- a/modules/virt-connecting-vm-internal-fqdn.adoc +++ b/modules/virt-connecting-vm-internal-fqdn.adoc @@ -29,7 +29,8 @@ $ virtctl console vm-fedora $ ping myvm.mysubdomain..svc.cluster.local ---- + -.Example output +Example output: ++ [source,terminal] ---- PING myvm.mysubdomain.default.svc.cluster.local (10.244.0.57) 56(84) bytes of data. diff --git a/modules/virt-connecting-vm-secondarynw-using-fqdn.adoc b/modules/virt-connecting-vm-secondarynw-using-fqdn.adoc index c895ebbca914..45317ab4af41 100644 --- a/modules/virt-connecting-vm-secondarynw-using-fqdn.adoc +++ b/modules/virt-connecting-vm-secondarynw-using-fqdn.adoc @@ -32,7 +32,8 @@ $ oc get dnses.config.openshift.io cluster -o json | jq .spec.baseDomain $ oc get vm -n -o yaml ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-creating-a-primary-cluster-udn.adoc b/modules/virt-creating-a-primary-cluster-udn.adoc index c15fc03f1321..b145bbdcece8 100644 --- a/modules/virt-creating-a-primary-cluster-udn.adoc +++ b/modules/virt-creating-a-primary-cluster-udn.adoc @@ -13,9 +13,8 @@ You can connect multiple namespaces to the same primary user-defined network (UD * You have installed the {oc-first}. .Procedure -. Create a `ClusterUserDefinedNetwork` object to specify the custom network configuration: +. Create a `ClusterUserDefinedNetwork` object to specify the custom network configuration. For example: + -.Example `ClusterUserDefinedNetwork` manifest [source,yaml] ---- kind: ClusterUserDefinedNetwork diff --git a/modules/virt-creating-a-primary-udn.adoc b/modules/virt-creating-a-primary-udn.adoc index 16940913a347..2defd0e0c551 100644 --- a/modules/virt-creating-a-primary-udn.adoc +++ b/modules/virt-creating-a-primary-udn.adoc @@ -13,9 +13,8 @@ You can create an isolated primary network in your project namespace by using th * You have created a namespace and applied the `k8s.ovn.org/primary-user-defined-network` label. .Procedure -. Create a `UserDefinedNetwork` object to specify the custom network configuration: +. Create a `UserDefinedNetwork` object to specify the custom network configuration. For example: + -.Example `UserDefinedNetwork` manifest [source,yaml] ---- apiVersion: k8s.ovn.org/v1 diff --git a/modules/virt-creating-and-exposing-mediated-devices.adoc b/modules/virt-creating-and-exposing-mediated-devices.adoc index 64308d2addc1..2e900f40c467 100644 --- a/modules/virt-creating-and-exposing-mediated-devices.adoc +++ b/modules/virt-creating-and-exposing-mediated-devices.adoc @@ -24,9 +24,9 @@ As an administrator, you can create mediated devices and expose them to the clus $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- + -.Example configuration file with mediated devices configured +For example: ++ [%collapsible] -==== [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1 @@ -51,11 +51,9 @@ spec: resourceName: nvidia.com/GRID_T4-8Q # ... ---- -==== -. Create mediated devices by adding them to the `spec.mediatedDevicesConfiguration` stanza: +. Create mediated devices by adding them to the `spec.mediatedDevicesConfiguration` stanza. For example: + -.Example YAML snippet [source,yaml] ---- # ... @@ -96,9 +94,8 @@ $ oc get $NODE -o json \ For example, the name file for the `nvidia-231` type contains the selector string `GRID T4-2Q`. Using `GRID T4-2Q` as the `mdevNameSelector` value allows nodes to use the `nvidia-231` type. . Expose the mediated devices to the cluster by adding the `mdevNameSelector` and `resourceName` values to the -`spec.permittedHostDevices.mediatedDevices` stanza of the `HyperConverged` CR: +`spec.permittedHostDevices.mediatedDevices` stanza of the `HyperConverged` CR. For example: + -.Example YAML snippet [source,yaml] ---- # ... diff --git a/modules/virt-creating-secondary-localnet-udn.adoc b/modules/virt-creating-secondary-localnet-udn.adoc index 38ea69594abd..8db6c0664160 100644 --- a/modules/virt-creating-secondary-localnet-udn.adoc +++ b/modules/virt-creating-secondary-localnet-udn.adoc @@ -14,9 +14,8 @@ You can create a secondary cluster-scoped user-defined-network (CUDN) for the lo * You installed the Kubernetes NMState Operator. .Procedure -. Create a `NodeNetworkConfigurationPolicy` object to map the OVN-Kubernetes secondary network to an Open vSwitch (OVS) bridge: +. Create a `NodeNetworkConfigurationPolicy` object to map the OVN-Kubernetes secondary network to an Open vSwitch (OVS) bridge. For example: + -.Example `NodeNetworkConfigurationPolicy` manifest [source,yaml] ---- apiVersion: nmstate.io/v1 @@ -55,9 +54,8 @@ where: :: Specifies the name of your `NodeNetworkConfigurationPolicy` manifest YAML file. -. Create a `ClusterUserDefinedNetwork` object to create a localnet secondary network: +. Create a `ClusterUserDefinedNetwork` object to create a localnet secondary network. For example: + -.Example `ClusterUserDefinedNetwork` manifest [source,yaml] ---- apiVersion: k8s.ovn.org/v1 diff --git a/modules/virt-creating-secondary-udn-namespace.adoc b/modules/virt-creating-secondary-udn-namespace.adoc index 1b6dc7d36834..944fe3e1d9df 100644 --- a/modules/virt-creating-secondary-udn-namespace.adoc +++ b/modules/virt-creating-secondary-udn-namespace.adoc @@ -16,7 +16,6 @@ You can create a namespace to be used with an existing secondary cluster-scoped .Procedure . Create a `Namespace` object similar to the following example: + -.Example `Namespace` manifest [source,yaml] ---- apiVersion: v1 diff --git a/modules/virt-creating-service-virtctl.adoc b/modules/virt-creating-service-virtctl.adoc index 7482105d9269..03fae27ab58d 100644 --- a/modules/virt-creating-service-virtctl.adoc +++ b/modules/virt-creating-service-virtctl.adoc @@ -24,7 +24,7 @@ $ virtctl expose vm --name --type --port ---- <1> Specify the `ClusterIP`, `NodePort`, or `LoadBalancer` service type. + -.Example +For example: + [source,terminal] ---- @@ -38,4 +38,4 @@ $ virtctl expose vm example-vm --name example-service --type NodePort --port 22 [source,terminal] ---- $ oc get service ----- \ No newline at end of file +---- diff --git a/modules/virt-creating-virtualmachineexport.adoc b/modules/virt-creating-virtualmachineexport.adoc index d3f0dea1b153..0d45727c900f 100644 --- a/modules/virt-creating-virtualmachineexport.adoc +++ b/modules/virt-creating-virtualmachineexport.adoc @@ -30,7 +30,6 @@ The export server supports the following file formats: . Create a `VirtualMachineExport` manifest to export a volume from a `VirtualMachine`, `VirtualMachineSnapshot`, or `PersistentVolumeClaim` CR according to the following example and save it as `example-export.yaml`: + -.`VirtualMachineExport` example [source,yaml] ---- apiVersion: export.kubevirt.io/v1beta1 @@ -66,9 +65,8 @@ $ oc create -f example-export.yaml $ oc get vmexport example-export -o yaml ---- + -The internal and external links for the exported volumes are displayed in the `status` stanza: +The internal and external links for the exported volumes are displayed in the `status` stanza, for example: + -.Output example [source,yaml] ---- apiVersion: export.kubevirt.io/v1beta1 diff --git a/modules/virt-creating-vm-cli.adoc b/modules/virt-creating-vm-cli.adoc index 70646c4813c1..b790243d6f7b 100644 --- a/modules/virt-creating-vm-cli.adoc +++ b/modules/virt-creating-vm-cli.adoc @@ -29,7 +29,6 @@ $ virtctl create vm --name rhel-9-minimal --volume-import type:ds,src:openshift- This example manifest does not configure VM authentication. ==== + -.Example manifest for a {op-system-base} VM [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-creating-vm-container-disk-cli.adoc b/modules/virt-creating-vm-container-disk-cli.adoc index db84421dc2d5..de6ae64248d8 100644 --- a/modules/virt-creating-vm-container-disk-cli.adoc +++ b/modules/virt-creating-vm-container-disk-cli.adoc @@ -71,9 +71,8 @@ $ oc create -f .yaml $ oc get vm ---- + -If the provisioning is successful, the VM status is `Running`: +If the provisioning is successful, the VM status is `Running`. For example: + -.Example output [source,terminal] ---- NAME AGE STATUS READY @@ -89,7 +88,6 @@ $ virtctl console + If the VM is running and the serial console is accessible, the output looks as follows: + -.Example output [source,terminal] ---- Successfully connected to vm-rhel-9 console. The escape sequence is ^] diff --git a/modules/virt-creating-vm-instancetype.adoc b/modules/virt-creating-vm-instancetype.adoc index d37e2d80e727..cfcee8ba8259 100644 --- a/modules/virt-creating-vm-instancetype.adoc +++ b/modules/virt-creating-vm-instancetype.adoc @@ -116,7 +116,7 @@ endif::[] . Optional: Click *View YAML & CLI* to view the YAML file. Click *CLI* to view the CLI commands. You can also download or copy either the YAML file contents or the CLI commands. . Click *Create VirtualMachine*. - +.Result After the VM is created, you can monitor the status on the *VirtualMachine details* page. ifeval::["{context}" == "virt-creating-vms"] @@ -127,4 +127,4 @@ ifeval::["{context}" == "static-key"] endif::[] ifeval::["{context}" == "dynamic-key"] :!dynamic-key: -endif::[] \ No newline at end of file +endif::[] diff --git a/modules/virt-creating-vm-snapshot-cli.adoc b/modules/virt-creating-vm-snapshot-cli.adoc index 818704909ee0..ed05c001389d 100644 --- a/modules/virt-creating-vm-snapshot-cli.adoc +++ b/modules/virt-creating-vm-snapshot-cli.adoc @@ -17,7 +17,8 @@ You can create a virtual machine (VM) snapshot for an offline or online VM by cr $ oc get kubevirt kubevirt-hyperconverged -n {CNVNamespace} -o yaml ---- + -.Truncated output +Truncated output: ++ [source,yaml] ---- spec: @@ -93,7 +94,8 @@ If you do not specify a unit of time such as `m` or `s`, the default is seconds $ oc describe vmsnapshot ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: snapshot.kubevirt.io/v1beta1 @@ -146,4 +148,4 @@ status: <5> Specifies additional information about the snapshot, such as whether it is an online snapshot, or whether it was created with QEMU guest agent running. <6> Lists the storage volumes that are part of the snapshot, as well as their parameters. -. Check the `includedVolumes` section in the snapshot description to verify that the expected PVCs are included in the snapshot. \ No newline at end of file +. Check the `includedVolumes` section in the snapshot description to verify that the expected PVCs are included in the snapshot. diff --git a/modules/virt-creating-vm-web-page-cli.adoc b/modules/virt-creating-vm-web-page-cli.adoc index 8e5db79514c6..213ea9d6b6c7 100644 --- a/modules/virt-creating-vm-web-page-cli.adoc +++ b/modules/virt-creating-vm-web-page-cli.adoc @@ -97,7 +97,8 @@ $ oc get dv + If the provisioning is successful, the data volume phase is `Succeeded`: + -.Example output +Example output: ++ [source,terminal] ---- NAME PHASE PROGRESS RESTARTS AGE @@ -113,7 +114,8 @@ $ virtctl console + If the VM is running and the serial console is accessible, the output looks as follows: + -.Example output +Example output: ++ [source,terminal] ---- Successfully connected to vm-rhel-9 console. The escape sequence is ^] diff --git a/modules/virt-customizing-storage-profile-default-cloning-strategy.adoc b/modules/virt-customizing-storage-profile-default-cloning-strategy.adoc index 03dc5964e97a..ace07789af72 100644 --- a/modules/virt-customizing-storage-profile-default-cloning-strategy.adoc +++ b/modules/virt-customizing-storage-profile-default-cloning-strategy.adoc @@ -2,7 +2,7 @@ // // * virt/storage/virt-configuring-storage-profile.adoc -:_mod-docs-content-type: PROCEDURE +:_mod-docs-content-type: CONCEPT [id="virt-customizing-storage-profile-default-cloning-strategy_{context}"] = Setting a default cloning strategy by using a storage profile @@ -20,6 +20,7 @@ You can set clone strategies using the CLI without modifying the default `claimP ==== .Example storage profile +==== [source,yaml] ---- apiVersion: cdi.kubevirt.io/v1beta1 @@ -40,3 +41,4 @@ status: <1> Specify the `accessModes`. <2> Specify the `volumeMode`. <3> Specify the default `cloneStrategy`. +==== diff --git a/modules/virt-customizing-storage-profile.adoc b/modules/virt-customizing-storage-profile.adoc index 41641e49cfa0..d754e9eb01e1 100644 --- a/modules/virt-customizing-storage-profile.adoc +++ b/modules/virt-customizing-storage-profile.adoc @@ -35,7 +35,6 @@ $ oc edit storageprofile + . Specify the `accessModes` and `volumeMode` values you want to configure for the storage profile. For example: + -.Example storage profile [source,yaml] ---- apiVersion: cdi.kubevirt.io/v1beta1 diff --git a/modules/virt-define-guest-agent-ping-probe.adoc b/modules/virt-define-guest-agent-ping-probe.adoc index 12ac459cf3fe..5836595e176e 100644 --- a/modules/virt-define-guest-agent-ping-probe.adoc +++ b/modules/virt-define-guest-agent-ping-probe.adoc @@ -21,8 +21,6 @@ include::snippets/technology-preview.adoc[] . Include details of the guest agent ping probe in the VM configuration file. For example: + - -.Sample guest agent ping probe [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-define-http-liveness-probe.adoc b/modules/virt-define-http-liveness-probe.adoc index 9ece1d023dff..49a980a648ad 100644 --- a/modules/virt-define-http-liveness-probe.adoc +++ b/modules/virt-define-http-liveness-probe.adoc @@ -15,10 +15,8 @@ Define an HTTP liveness probe by setting the `spec.livenessProbe.httpGet` field .Procedure -. Include details of the HTTP liveness probe in the VM configuration file. +. Include details of the HTTP liveness probe in the VM configuration file. For example: + - -.Sample liveness probe with an HTTP GET test [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-define-http-readiness-probe.adoc b/modules/virt-define-http-readiness-probe.adoc index 008c524ef3ab..177b4db2c543 100644 --- a/modules/virt-define-http-readiness-probe.adoc +++ b/modules/virt-define-http-readiness-probe.adoc @@ -13,10 +13,8 @@ Define an HTTP readiness probe by setting the `spec.readinessProbe.httpGet` fiel * You have installed the {oc-first}. .Procedure -. Include details of the readiness probe in the VM configuration file. +. Include details of the readiness probe in the VM configuration file. For example: + - -.Sample readiness probe with an HTTP GET test [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-define-tcp-readiness-probe.adoc b/modules/virt-define-tcp-readiness-probe.adoc index 97c07208f86d..39e12da4b188 100644 --- a/modules/virt-define-tcp-readiness-probe.adoc +++ b/modules/virt-define-tcp-readiness-probe.adoc @@ -15,10 +15,8 @@ Define a TCP readiness probe by setting the `spec.readinessProbe.tcpSocket` fiel .Procedure -. Include details of the TCP readiness probe in the VM configuration file. +. Include details of the TCP readiness probe in the VM configuration file. For example: + - -.Sample readiness probe with a TCP socket test [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-defining-apps-for-dr.adoc b/modules/virt-defining-apps-for-dr.adoc index 65dd7449e9e8..ea35b428b249 100644 --- a/modules/virt-defining-apps-for-dr.adoc +++ b/modules/virt-defining-apps-for-dr.adoc @@ -15,19 +15,16 @@ An {rh-rhacm}-managed application that includes a VM must be created by using a There are several actions you can take to improve your experience and chance of success when defining an {rh-rhacm}-managed VM. -[discrete] -[id="use-a-pvc-and-populator_{context}"] -=== Use a PVC and populator to define storage for the VM +Use a PVC and populator to define storage for the VM:: ++ Because data volumes create persistent volume claims (PVCs) implicitly, data volumes and VMs with data volume templates do not fit as neatly into the GitOps model. -[discrete] -[id="use-import-method_{context}"] -=== Use the import method when choosing a population source for your VM disk +Use the import method when choosing a population source for your VM disk:: ++ Select a {op-system-base} image from the software catalog to use the import method. Red{nbsp}Hat recommends using a specific version of the image rather than a floating tag for consistent results. The KubeVirt community maintains container disks for other operating systems in a Quay repository. -[discrete] -[id="use-pull-node_{context}"] -=== Use `pullMethod: node` +Use `pullMethod: node`:: ++ Use the pod `pullMethod: node` when creating a data volume from a registry source to take advantage of the {product-title} pull secret, which is required to pull container images from the Red{nbsp}Hat registry. [id="best-practices-{rh-rhacm}-discovered-vm_{context}"] @@ -37,9 +34,8 @@ You can configure any VM in the cluster that is not an {rh-rhacm}-managed applic There are several actions you can take to improve your experience and chance of success when defining an {rh-rhacm}-discovered VM. -[discrete] -[id="protect-the-vm_{context}"] -=== Protect the VM when using MTV, the {product-title} web console, or a custom VM +Protect the VM when using MTV, the {product-title} web console, or a custom VM:: ++ Because automatic labeling is not currently available, the application owner must manually label the components of the VM application when using MTV, the {product-title} web console, or a custom VM. After creating the VM, apply a common label to the following resources associated with the VM: `VirtualMachine`, `DataVolume`, `PersistentVolumeClaim`, `Service`, `Route`, `Secret`, `ConfigMap`, `VirtualMachinePreference`, and `VirtualMachineInstancetype`. Do not label virtual machine instances (VMIs) or pods; {VirtProductName} creates and manages these automatically. @@ -49,12 +45,10 @@ After creating the VM, apply a common label to the following resources associate You must apply the common label to everything in the namespace that you want to protect, including objects that you added to the VM that are not listed here. ==== -[discrete] -[id="working-vm-contains_{context}"] -=== Include more than the `VirtualMachine` object in the VM +Include more than the `VirtualMachine` object in the VM:: ++ Working VMs typically also contain data volumes, persistent volume claims (PVCs), services, routes, secrets, `ConfigMap` objects, and `VirtualMachineSnapshot` objects. -[discrete] -[id="part-of-larger-app_{context}"] -=== Include the VM as part of a larger logical application -This includes other pod-based workloads and VMs. \ No newline at end of file +Include the VM as part of a larger logical application:: ++ +This includes other pod-based workloads and VMs. diff --git a/modules/virt-delete-vm-web.adoc b/modules/virt-delete-vm-web.adoc index 9ca4f2f8be27..cfeb528c0b89 100644 --- a/modules/virt-delete-vm-web.adoc +++ b/modules/virt-delete-vm-web.adoc @@ -11,7 +11,7 @@ Deleting a virtual machine (VM) permanently removes it from the cluster. If the VM is delete protected, the *Delete* action is disabled in the VM's *Actions* menu. -.Prequisite +.Prerequisite * To delete the VM, you must first disable the VM's delete protection setting, if enabled. diff --git a/modules/virt-deleting-virt-cli.adoc b/modules/virt-deleting-virt-cli.adoc index 446f8e9c60ef..6fdc494eb189 100644 --- a/modules/virt-deleting-virt-cli.adoc +++ b/modules/virt-deleting-virt-cli.adoc @@ -51,7 +51,8 @@ $ oc delete namespace openshift-cnv $ oc delete crd --dry-run=client -l operators.coreos.com/kubevirt-hyperconverged.{CNVNamespace} ---- + -.Example output +Example output: ++ ---- customresourcedefinition.apiextensions.k8s.io "cdis.cdi.kubevirt.io" deleted (dry run) customresourcedefinition.apiextensions.k8s.io "hostpathprovisioners.hostpathprovisioner.kubevirt.io" deleted (dry run) diff --git a/modules/virt-deploying-operator-cli.adoc b/modules/virt-deploying-operator-cli.adoc index eff25338a58b..e88b7f82fe1f 100644 --- a/modules/virt-deploying-operator-cli.adoc +++ b/modules/virt-deploying-operator-cli.adoc @@ -50,7 +50,6 @@ $ watch oc get csv -n {CNVNamespace} + The following output displays if deployment was successful: + -.Example output [source,terminal,subs="attributes+"] ---- NAME DISPLAY VERSION REPLACES PHASE diff --git a/modules/virt-disabling-tls-for-registry.adoc b/modules/virt-disabling-tls-for-registry.adoc index 4390e6d5ad23..a16d40401908 100644 --- a/modules/virt-disabling-tls-for-registry.adoc +++ b/modules/virt-disabling-tls-for-registry.adoc @@ -21,9 +21,8 @@ You can disable TLS (transport layer security) for one or more container registr $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- -. Add a list of insecure registries to the `spec.storageImport.insecureRegistries` field. +. Add a list of insecure registries to the `spec.storageImport.insecureRegistries` field. For example: + -.Example `HyperConverged` custom resource [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-discovering-vm-internal-fqdn.adoc b/modules/virt-discovering-vm-internal-fqdn.adoc index 7c94dfc0495a..83640763aaef 100644 --- a/modules/virt-discovering-vm-internal-fqdn.adoc +++ b/modules/virt-discovering-vm-internal-fqdn.adoc @@ -23,7 +23,8 @@ If a headless service exists with a name that matches the subdomain, a unique DN $ oc edit vm ---- + -.Example `VirtualMachine` manifest file +For example: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-dv-annotations.adoc b/modules/virt-dv-annotations.adoc index d3c3d5f444d7..15cab8c552e8 100644 --- a/modules/virt-dv-annotations.adoc +++ b/modules/virt-dv-annotations.adoc @@ -10,6 +10,7 @@ This example shows how you can configure data volume (DV) annotations to control If you want the importer pod to use both the default network from the cluster and the secondary multus network, use the `k8s.v1.cni.cncf.io/networks: ` annotation. .Multus network annotation example +==== [source,yaml] ---- apiVersion: cdi.kubevirt.io/v1beta1 @@ -21,3 +22,4 @@ metadata: # ... ---- <1> Multus network annotation +==== diff --git a/modules/virt-edit-boot-order-web.adoc b/modules/virt-edit-boot-order-web.adoc index 3277f02b3fa1..1130b570910b 100644 --- a/modules/virt-edit-boot-order-web.adoc +++ b/modules/virt-edit-boot-order-web.adoc @@ -25,7 +25,7 @@ Edit the boot order list in the web console. * If you use a screen reader, press the Up Arrow key or Down Arrow key to move the item in the boot order list. Then, press the *Tab* key to drop the item in a location of your choice. . Click *Save*. - ++ [NOTE] ==== If the virtual machine is running, changes to the boot order list will not take effect until you restart the virtual machine. diff --git a/modules/virt-editing-vm-yaml-web.adoc b/modules/virt-editing-vm-yaml-web.adoc index eaea4fa525b9..7b64d6eb8083 100644 --- a/modules/virt-editing-vm-yaml-web.adoc +++ b/modules/virt-editing-vm-yaml-web.adoc @@ -36,6 +36,7 @@ Navigating away from the YAML screen while editing cancels any changes to the co . Edit the file and click *Save*. +.Result A confirmation message shows that the modification has been successful and includes the updated version number for the object. //Ending conditional expressions diff --git a/modules/virt-enabling-descheduler-evictions.adoc b/modules/virt-enabling-descheduler-evictions.adoc index 566044ca0a0b..eb0dbe317794 100644 --- a/modules/virt-enabling-descheduler-evictions.adoc +++ b/modules/virt-enabling-descheduler-evictions.adoc @@ -49,4 +49,5 @@ spec: <2> By default, the descheduler does not evict pods. To evict pods, set `mode` to `Automatic`. <3> Enabling `devEnableEvictionsInBackground` allows evictions to occur in the background, improving stability and mitigating oscillatory behavior during live migrations. -The descheduler is now enabled on the VM. \ No newline at end of file +.Result +The descheduler is now enabled on the VM. diff --git a/modules/virt-enabling-dynamic-key-injection-cli.adoc b/modules/virt-enabling-dynamic-key-injection-cli.adoc index d53c2ab87b30..e93670620ed8 100644 --- a/modules/virt-enabling-dynamic-key-injection-cli.adoc +++ b/modules/virt-enabling-dynamic-key-injection-cli.adoc @@ -22,9 +22,8 @@ The key is added to the VM by the QEMU guest agent, which is installed automatic .Procedure -. Create a manifest file for a `VirtualMachine` object and a `Secret` object: +. Create a manifest file for a `VirtualMachine` object and a `Secret` object. For example: + -.Example manifest [source,yaml] ---- include::snippets/virt-dynamic-key.yaml[] @@ -55,7 +54,8 @@ $ virtctl start vm example-vm -n example-namespace $ oc describe vm example-vm -n example-namespace ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-enabling-usb-host-passthrough.adoc b/modules/virt-enabling-usb-host-passthrough.adoc index 69545891454d..e02dd469591d 100644 --- a/modules/virt-enabling-usb-host-passthrough.adoc +++ b/modules/virt-enabling-usb-host-passthrough.adoc @@ -32,9 +32,7 @@ $ oc edit hyperconverged kubevirt-hyperconverged -n openshift-cnv ---- . Add a USB device to the `permittedHostDevices` stanza, as shown in the following example: - + -.Example YAML snippet [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-enabling-volume-snapshot-boot-source.adoc b/modules/virt-enabling-volume-snapshot-boot-source.adoc index 9f170abd63e1..500a8370554d 100644 --- a/modules/virt-enabling-volume-snapshot-boot-source.adoc +++ b/modules/virt-enabling-volume-snapshot-boot-source.adoc @@ -31,9 +31,8 @@ $ oc edit storageprofile . Review the `dataImportCronSourceFormat` specification of the `StorageProfile` to confirm whether or not the VM is using PVC or volume snapshot by default. -. Edit the storage profile, if needed, by updating the `dataImportCronSourceFormat` specification to `snapshot`. +. Edit the storage profile, if needed, by updating the `dataImportCronSourceFormat` specification to `snapshot`. For example: + -.Example storage profile [source,yaml] ---- apiVersion: cdi.kubevirt.io/v1beta1 diff --git a/modules/virt-example-bond-nncp.adoc b/modules/virt-example-bond-nncp.adoc index cf6c2c7a62ed..615c4f6d062d 100644 --- a/modules/virt-example-bond-nncp.adoc +++ b/modules/virt-example-bond-nncp.adoc @@ -13,9 +13,9 @@ to the cluster. ==== {VirtProductName} only supports the following bond modes: -* mode=1 active-backup + -* mode=2 balance-xor + -* mode=4 802.3ad + +* mode=1 active-backup +* mode=2 balance-xor +* mode=4 802.3ad Other bond modes are not supported. ==== diff --git a/modules/virt-example-nmstate-IP-management.adoc b/modules/virt-example-nmstate-IP-management.adoc index 1ab4dfbfec46..93e8362925c6 100644 --- a/modules/virt-example-nmstate-IP-management.adoc +++ b/modules/virt-example-nmstate-IP-management.adoc @@ -138,7 +138,8 @@ The following example shows a default situation that stores DNS values globally: * Configure a static DNS without a network interface. Note that when updating the `/etc/resolv.conf` file on a host node, you do not need to specify an interface, IPv4 or IPv6, in the `NodeNetworkConfigurationPolicy` (NNCP) manifest. + -.Example of a DNS configuration for a network interface that globally stores DNS values +Example of a DNS configuration for a network interface that globally stores DNS values: ++ [source,yaml] ---- apiVersion: nmstate.io/v1 @@ -192,7 +193,8 @@ The following examples show situations that require configuring a network interf * If you want to rank a static DNS name server over a dynamic DNS name server, define the interface that runs either the Dynamic Host Configuration Protocol (DHCP) or the IPv6 Autoconfiguration (`autoconf`) mechanism in the network interface YAML configuration file. + -.Example configuration that adds `192.0.2.1` to DNS name servers retrieved from the DHCPv4 network protocol +Example configuration that adds `192.0.2.1` to DNS name servers retrieved from the DHCPv4 network protocol: ++ [source,yaml] ---- # ... @@ -218,7 +220,8 @@ interfaces: Storing DNS values at the network interface level might cause name resolution issues after you attach the interface to network components, such as an Open vSwitch (OVS) bridge, a Linux bridge, or a bond. ==== + -.Example configuration that stores DNS values at the interface level +Example configuration that stores DNS values at the interface level: ++ [source,yaml] ---- # ... @@ -253,7 +256,8 @@ interfaces: * If you want to set static DNS search domains and dynamic DNS name servers for your network interface, define the dynamic interface that runs either the Dynamic Host Configuration Protocol (DHCP) or the IPv6 Autoconfiguration (`autoconf`) mechanism in the network interface YAML configuration file. + -.Example configuration that sets `example.com` and `example.org` static DNS search domains along with dynamic DNS name server settings +Example configuration that sets `example.com` and `example.org` static DNS search domains along with dynamic DNS name server settings: ++ [source,yaml] ---- # ... diff --git a/modules/virt-example-vm-node-placement-node-affinity.adoc b/modules/virt-example-vm-node-placement-node-affinity.adoc index 91ef201e2df7..6983394879a8 100644 --- a/modules/virt-example-vm-node-placement-node-affinity.adoc +++ b/modules/virt-example-vm-node-placement-node-affinity.adoc @@ -11,6 +11,7 @@ In this example, the VM must be scheduled on a node that has the label `example. If possible, the scheduler avoids nodes that have the label `example-node-label-key = example-node-label-value`. However, if all candidate nodes have this label, the scheduler ignores this constraint. .Example VM manifest +==== [source,yaml] ---- metadata: @@ -42,3 +43,4 @@ spec: ---- <1> If you use the `requiredDuringSchedulingIgnoredDuringExecution` rule type, the VM is not scheduled if the constraint is not met. <2> If you use the `preferredDuringSchedulingIgnoredDuringExecution` rule type, the VM is still scheduled if the constraint is not met, as long as all required constraints are met. +==== diff --git a/modules/virt-example-vm-node-placement-node-selector.adoc b/modules/virt-example-vm-node-placement-node-selector.adoc index aca177ca30f5..9b07da0eaad3 100644 --- a/modules/virt-example-vm-node-placement-node-selector.adoc +++ b/modules/virt-example-vm-node-placement-node-selector.adoc @@ -14,6 +14,7 @@ If there are no nodes that fit this description, the virtual machine is not sche ==== .Example VM manifest +==== [source,yaml] ---- metadata: @@ -28,3 +29,4 @@ spec: example-key-2: example-value-2 # ... ---- +==== diff --git a/modules/virt-example-vm-node-placement-pod-affinity.adoc b/modules/virt-example-vm-node-placement-pod-affinity.adoc index 911552cb5491..9573b3a53eff 100644 --- a/modules/virt-example-vm-node-placement-pod-affinity.adoc +++ b/modules/virt-example-vm-node-placement-pod-affinity.adoc @@ -11,6 +11,7 @@ In this example, the VM must be scheduled on a node that has a running pod with If possible, the VM is not scheduled on a node that has any pod with the label `example-key-2 = example-value-2`. However, if all candidate nodes have a pod with this label, the scheduler ignores this constraint. .Example VM manifest +==== [source,yaml] ---- metadata: @@ -45,3 +46,4 @@ spec: ---- <1> If you use the `requiredDuringSchedulingIgnoredDuringExecution` rule type, the VM is not scheduled if the constraint is not met. <2> If you use the `preferredDuringSchedulingIgnoredDuringExecution` rule type, the VM is still scheduled if the constraint is not met, as long as all required constraints are met. +==== diff --git a/modules/virt-example-vm-node-placement-tolerations.adoc b/modules/virt-example-vm-node-placement-tolerations.adoc index 717c099f31eb..ac818cf3522d 100644 --- a/modules/virt-example-vm-node-placement-tolerations.adoc +++ b/modules/virt-example-vm-node-placement-tolerations.adoc @@ -14,6 +14,7 @@ A virtual machine that tolerates a taint is not required to schedule onto a node ==== .Example VM manifest +==== [source,yaml] ---- metadata: @@ -28,3 +29,4 @@ spec: effect: "NoSchedule" # ... ---- +==== diff --git a/modules/virt-expanding-storage-with-data-volumes.adoc b/modules/virt-expanding-storage-with-data-volumes.adoc index bde629b2519e..a7fec08d8299 100644 --- a/modules/virt-expanding-storage-with-data-volumes.adoc +++ b/modules/virt-expanding-storage-with-data-volumes.adoc @@ -17,7 +17,6 @@ You can expand the available storage of a virtual machine (VM) by adding blank d . Create a `DataVolume` manifest as shown in the following example: + -.Example `DataVolume` manifest [source,yaml] ---- apiVersion: cdi.kubevirt.io/v1beta1 diff --git a/modules/virt-exposing-pci-device-in-cluster-cli.adoc b/modules/virt-exposing-pci-device-in-cluster-cli.adoc index a41c35d0ebe6..735191f584fd 100644 --- a/modules/virt-exposing-pci-device-in-cluster-cli.adoc +++ b/modules/virt-exposing-pci-device-in-cluster-cli.adoc @@ -22,7 +22,6 @@ $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} . Add the PCI device information to the `spec.permittedHostDevices.pciHostDevices` array. For example: + -.Example configuration file [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1 @@ -63,7 +62,8 @@ The above example snippet shows two PCI host devices that are named `nvidia.com/ $ oc describe node ---- + -.Example output +Example output: ++ [source,terminal] ---- Capacity: diff --git a/modules/virt-generalizing-linux-vm-image.adoc b/modules/virt-generalizing-linux-vm-image.adoc index 312f013fb0cf..36448c4b38d5 100644 --- a/modules/virt-generalizing-linux-vm-image.adoc +++ b/modules/virt-generalizing-linux-vm-image.adoc @@ -34,7 +34,8 @@ $ virtctl stop $ oc get vm -o jsonpath="{.spec.template.spec.volumes}{'\n'}" ---- + -.Example output +Example output: ++ [source,terminal] ---- [{"dataVolume":{"name":""},"name":"rootdisk"},{"cloudInitNoCloud":{...}] @@ -47,7 +48,8 @@ $ oc get vm -o jsonpath="{.spec.template.spec.volumes}{'\n'}" $ oc get pvc ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE @@ -97,4 +99,6 @@ $ virt-sysprep -a disk.img .. Click *Save*. +.Result + The new volume appears in the *Select volume to boot from* list. This is your new golden image. You can use this volume to create new VMs. diff --git a/modules/virt-generalizing-windows-sysprep.adoc b/modules/virt-generalizing-windows-sysprep.adoc index 5d84cc310ae9..a8be7daf3e73 100644 --- a/modules/virt-generalizing-windows-sysprep.adoc +++ b/modules/virt-generalizing-windows-sysprep.adoc @@ -31,4 +31,6 @@ Before generalizing the VM, you must ensure the `sysprep` tool cannot detect an ---- . After the `sysprep` tool completes, the Windows VM shuts down. The disk image of the VM is now available to use as an installation image for Windows VMs. +.Result + You can now specialize the VM. diff --git a/modules/virt-golden-images-namespace-cli.adoc b/modules/virt-golden-images-namespace-cli.adoc index 24d979f69701..e6ff319f2ff9 100644 --- a/modules/virt-golden-images-namespace-cli.adoc +++ b/modules/virt-golden-images-namespace-cli.adoc @@ -23,9 +23,8 @@ You can configure a custom namespace for golden images in your cluster by settin $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- -. Configure the custom namespace by updating the value of the `spec.commonBootImageNamespace` field: +. Configure the custom namespace by updating the value of the `spec.commonBootImageNamespace` field. For example: + -.Example configuration file [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1 @@ -39,4 +38,4 @@ spec: ---- <1> The namespace to use for golden images. -. Save your changes and exit the editor. \ No newline at end of file +. Save your changes and exit the editor. diff --git a/modules/virt-hot-plugging-bridge-network-interface-cli.adoc b/modules/virt-hot-plugging-bridge-network-interface-cli.adoc index 8e00a4c290fb..e8e3444077a2 100644 --- a/modules/virt-hot-plugging-bridge-network-interface-cli.adoc +++ b/modules/virt-hot-plugging-bridge-network-interface-cli.adoc @@ -24,14 +24,15 @@ Hot plug a secondary network interface to a virtual machine (VM) while the VM is $ virtctl start -n ---- -. Use the following command to add the new network interface to the running VM. Editing the VM specification adds the new network interface to the VM and virtual machine instance (VMI) configuration but does not attach it to the running VM. +. Use the following command to add the new network interface to the running VM. Editing the VM specification adds the new network interface to the VM and virtual machine instance (VMI) configuration but does not attach it to the running VM: + [source,terminal] ---- $ oc edit vm ---- + -.Example VM configuration +For example: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 @@ -78,7 +79,8 @@ $ virtctl migrate $ oc get VirtualMachineInstanceMigration -w ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME PHASE VMI @@ -97,7 +99,8 @@ kubevirt-migrate-vm-lj62q Succeeded vm-fedora $ oc get vmi vm-fedora -ojsonpath="{ @.status.interfaces }" ---- + -.Example output +Example output: ++ [source,json] ---- [ @@ -122,4 +125,4 @@ $ oc get vmi vm-fedora -ojsonpath="{ @.status.interfaces }" } ] ---- -<1> The hot plugged interface appears in the VMI status. \ No newline at end of file +<1> The hot plugged interface appears in the VMI status. diff --git a/modules/virt-hot-plugging-memory.adoc b/modules/virt-hot-plugging-memory.adoc index aa198f880660..8163e18d0424 100644 --- a/modules/virt-hot-plugging-memory.adoc +++ b/modules/virt-hot-plugging-memory.adoc @@ -16,9 +16,11 @@ You can add or remove the amount of memory allocated to a virtual machine (VM) w . On the *Configuration* tab, click *Edit CPU|Memory*. . Enter the desired amount of memory and click *Save*. +.Result + The system applies these changes immediately. If the VM is migratable, a live migration is triggered. If not, or if the changes cannot be live-updated, a `RestartRequired` condition is added to the VM. [NOTE] ==== Linux guests require a kernel version of 5.16 or later and Windows guests require the latest `viomem` drivers. -==== \ No newline at end of file +==== diff --git a/modules/virt-hot-unplugging-bridge-network-interface-cli.adoc b/modules/virt-hot-unplugging-bridge-network-interface-cli.adoc index b48b9d9bca3d..1b4dc912bc06 100644 --- a/modules/virt-hot-unplugging-bridge-network-interface-cli.adoc +++ b/modules/virt-hot-unplugging-bridge-network-interface-cli.adoc @@ -23,14 +23,15 @@ Hot unplugging is not supported for Single Root I/O Virtualization (SR-IOV) inte .Procedure -. Edit the VM specification to hot unplug a secondary network interface. Setting the interface state to `absent` detaches the network interface from the guest, but the interface still exists in the pod. +. Edit the VM specification to hot unplug a secondary network interface. Setting the interface state to `absent` detaches the network interface from the guest, but the interface still exists in the pod: + [source,terminal] ---- $ oc edit vm ---- + -.Example VM configuration +For example: ++ [source,yaml] ---- apiVersion: kubevirt.io/v1 diff --git a/modules/virt-initiating-vm-migration-cli.adoc b/modules/virt-initiating-vm-migration-cli.adoc index 64d8f7ce0782..651f9e684053 100644 --- a/modules/virt-initiating-vm-migration-cli.adoc +++ b/modules/virt-initiating-vm-migration-cli.adoc @@ -45,7 +45,8 @@ The `VirtualMachineInstanceMigration` object triggers a live migration of the VM $ oc describe vmi -n ---- + -.Example output +Example output: ++ [source,yaml] ---- # ... diff --git a/modules/virt-latency-checkup-web-console.adoc b/modules/virt-latency-checkup-web-console.adoc index 74b6880514b1..852449c169de 100644 --- a/modules/virt-latency-checkup-web-console.adoc +++ b/modules/virt-latency-checkup-web-console.adoc @@ -25,4 +25,6 @@ Run a latency checkup to verify network connectivity and measure the latency bet . Optional: Target specific nodes by enabling *Select nodes* and specifying the *Source node* and *Target node*. . Click *Run*. -You can view the status of the latency checkup in the *Checkups* list on the *Latency checkup* tab. Click on the name of the checkup for more details. \ No newline at end of file +.Result + +You can view the status of the latency checkup in the *Checkups* list on the *Latency checkup* tab. Click on the name of the checkup for more details. diff --git a/modules/virt-loki-log-queries.adoc b/modules/virt-loki-log-queries.adoc index 1b6a008e62ec..a24c46853e0c 100644 --- a/modules/virt-loki-log-queries.adoc +++ b/modules/virt-loki-log-queries.adoc @@ -2,7 +2,7 @@ // // * virt/support/virt-troubleshooting.adoc -:_mod-docs-content-type: reference +:_mod-docs-content-type: REFERENCE [id="virt-loki-log-queries_{context}"] = {VirtProductName} LogQL queries @@ -111,9 +111,11 @@ You can filter log lines to include or exclude strings or regular expressions by |==== .Example line filter expression +==== [source,text] ---- {log_type=~".+"}|json |kubernetes_labels_app_kubernetes_io_part_of="hyperconverged-cluster" |= "error" != "timeout" ----- \ No newline at end of file +---- +==== diff --git a/modules/virt-measuring-latency-vm-secondary-network.adoc b/modules/virt-measuring-latency-vm-secondary-network.adoc index cdebda11dfaf..d7ef096bdfb2 100644 --- a/modules/virt-measuring-latency-vm-secondary-network.adoc +++ b/modules/virt-measuring-latency-vm-secondary-network.adoc @@ -25,11 +25,9 @@ You run a latency checkup by performing the following steps: .Procedure -. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest for the latency checkup: +. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest for the latency checkup. For example: + -.Example role manifest file [%collapsible] -==== [source,yaml] ---- --- @@ -86,7 +84,6 @@ roleRef: name: kiagnose-configmap-access apiGroup: rbac.authorization.k8s.io ---- -==== . Apply the `ServiceAccount`, `Role`, and `RoleBinding` manifest: + @@ -96,9 +93,8 @@ $ oc apply -n -f .yaml <1> ---- <1> `` is the namespace where the checkup is to be run. This must be an existing namespace where the `NetworkAttachmentDefinition` object resides. -. Create a `ConfigMap` manifest that contains the input parameters for the checkup: +. Create a `ConfigMap` manifest that contains the input parameters for the checkup. For example: + -.Example input config map [source,yaml] ---- apiVersion: v1 @@ -129,9 +125,8 @@ data: $ oc apply -n -f .yaml ---- -. Create a `Job` manifest to run the checkup: +. Create a `Job` manifest to run the checkup. For example: + -.Example job manifest [source,yaml,subs="attributes+"] ---- apiVersion: batch/v1 @@ -188,7 +183,8 @@ $ oc wait job kubevirt-vm-latency-checkup -n --for condition= $ oc get configmap kubevirt-vm-latency-checkup-config -n -o yaml ---- + -.Example output config map (success) +Example output config map (success): ++ [source,yaml] ---- apiVersion: v1 diff --git a/modules/virt-metro-dr-odf.adoc b/modules/virt-metro-dr-odf.adoc index 1276c6dce20f..bf080bb7ee02 100644 --- a/modules/virt-metro-dr-odf.adoc +++ b/modules/virt-metro-dr-odf.adoc @@ -8,7 +8,8 @@ {VirtProductName} supports the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/latest/html-single/configuring_openshift_data_foundation_disaster_recovery_for_openshift_workloads/index#metro-dr-solution[Metro-DR solution for {rh-storage}], which provides two-way synchronous data replication between managed {VirtProductName} clusters installed on primary and secondary sites. -.Metro-DR differences +Metro-DR differences: + * This synchronous solution is only available to metropolitan distance data centers with a network round-trip latency of 10 milliseconds or less. * Multiple disk VMs are supported. * To prevent data corruption, you must ensure that storage is fenced during failover. @@ -18,4 +19,4 @@ Fencing means isolating a node so that workloads do not run on it. ==== -For more information about using the Metro-DR solution for {rh-storage} with {VirtProductName}, see {ibm-title}'s {rh-storage} Metro-DR documentation. \ No newline at end of file +For more information about using the Metro-DR solution for {rh-storage} with {VirtProductName}, see {ibm-title}'s {rh-storage} Metro-DR documentation. diff --git a/modules/virt-monitoring-upgrade-status.adoc b/modules/virt-monitoring-upgrade-status.adoc index fa9ed0eafb1e..548a451a5ef9 100644 --- a/modules/virt-monitoring-upgrade-status.adoc +++ b/modules/virt-monitoring-upgrade-status.adoc @@ -30,7 +30,6 @@ $ oc get csv -n {CNVNamespace} . Review the output, checking the `PHASE` field. For example: + -.Example output [source,terminal,subs="attributes+"] ---- VERSION REPLACES PHASE @@ -49,7 +48,6 @@ $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} \ + A successful upgrade results in the following output: + -.Example output [source,terminal] ---- ReconcileComplete True Reconcile completed successfully diff --git a/modules/virt-node-network-config-console.adoc b/modules/virt-node-network-config-console.adoc index a36f599fedfc..ce7ea505d157 100644 --- a/modules/virt-node-network-config-console.adoc +++ b/modules/virt-node-network-config-console.adoc @@ -1,5 +1,6 @@ :_mod-docs-content-type: CONCEPT [id="virt-node-network-config-console_{context}"] = Managing policy from the web console + You can update the node network configuration, such as adding or removing interfaces from nodes, by applying `NodeNetworkConfigurationPolicy` manifests to the cluster. -Manage the policy from the web console by accessing the list of created policies in the *NodeNetworkConfigurationPolicy* page under the *Networking* menu. This page enables you to create, update, monitor, and delete the policies. \ No newline at end of file +Manage the policy from the web console by accessing the list of created policies in the *NodeNetworkConfigurationPolicy* page under the *Networking* menu. This page enables you to create, update, monitor, and delete the policies. diff --git a/modules/virt-optimizing-clone-performance-at-scale-in-openshift-data-foundation.adoc b/modules/virt-optimizing-clone-performance-at-scale-in-openshift-data-foundation.adoc index 65b190c0c5fd..9547730a6dbc 100644 --- a/modules/virt-optimizing-clone-performance-at-scale-in-openshift-data-foundation.adoc +++ b/modules/virt-optimizing-clone-performance-at-scale-in-openshift-data-foundation.adoc @@ -11,7 +11,9 @@ When you use {rh-storage}, the storage profile configures the default cloning st To improve performance when creating hundreds of clones from a single source PVC, use the `VolumeSnapshot` cloning method instead of the default `csi-clone` strategy. .Procedure -Create a `VolumeSnapshot` custom resource (CR) of the source image by using the following content: + +. Create a `VolumeSnapshot` custom resource (CR) of the source image by using the following content: ++ [source,yaml] ---- apiVersion: snapshot.storage.k8s.io/v1 @@ -26,7 +28,7 @@ spec: ---- . Add the `spec.source.snapshot` stanza to reference the `VolumeSnapshot` as the source for the `DataVolume clone`: - ++ [source,yaml] ---- spec: @@ -34,4 +36,4 @@ spec: snapshot: namespace: golden-ns name: golden-volumesnapshot ----- \ No newline at end of file +---- diff --git a/modules/virt-options-configuring-mdevs.adoc b/modules/virt-options-configuring-mdevs.adoc index b419b79906f9..df422a7fb459 100644 --- a/modules/virt-options-configuring-mdevs.adoc +++ b/modules/virt-options-configuring-mdevs.adoc @@ -24,7 +24,6 @@ Setting this feature gate as described in the link:https://docs.nvidia.com/datac ==== * You must configure your `ClusterPolicy` manifest so that it matches the following example: + -.Example manifest [source,yaml] ---- kind: ClusterPolicy diff --git a/modules/virt-preventing-nvidia-gpu-operands-from-deploying-on-nodes.adoc b/modules/virt-preventing-nvidia-gpu-operands-from-deploying-on-nodes.adoc index a2429f2f5d3b..ebea05ec6d51 100644 --- a/modules/virt-preventing-nvidia-gpu-operands-from-deploying-on-nodes.adoc +++ b/modules/virt-preventing-nvidia-gpu-operands-from-deploying-on-nodes.adoc @@ -50,8 +50,8 @@ $ oc describe node $ oc get pods -n nvidia-gpu-operator ---- + -.Example output - +Example output: ++ [source,terminal] ---- NAME READY STATUS RESTARTS AGE @@ -71,8 +71,8 @@ nvidia-vfio-manager-zqtck 1/1 Terminating 0 9d $ oc get pods -n nvidia-gpu-operator ---- + -.Example output - +Example output: ++ [source,terminal] ---- NAME READY STATUS RESTARTS AGE diff --git a/modules/virt-preventing-workload-updates-during-control-plane-only-update.adoc b/modules/virt-preventing-workload-updates-during-control-plane-only-update.adoc index a37bd0467ae4..ba6be324a32a 100644 --- a/modules/virt-preventing-workload-updates-during-control-plane-only-update.adoc +++ b/modules/virt-preventing-workload-updates-during-control-plane-only-update.adoc @@ -59,7 +59,8 @@ $ oc patch hyperconverged kubevirt-hyperconverged -n {CNVNamespace} \ --type json -p '[{"op":"replace","path":"/spec/workloadUpdateStrategy/workloadUpdateMethods", "value":[]}]' ---- + -.Example output +Example output: ++ [source,terminal] ---- hyperconverged.hco.kubevirt.io/kubevirt-hyperconverged patched @@ -72,9 +73,9 @@ hyperconverged.hco.kubevirt.io/kubevirt-hyperconverged patched $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} -o json | jq ".status.conditions" ---- + -.Example output +Example output: ++ [%collapsible] -==== [source,json] ---- [ @@ -120,7 +121,6 @@ $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} -o json | jq " } ] ---- -==== <1> The {VirtProductName} Operator has the `Upgradeable` status. . Manually update your cluster from the source EUS version to the next minor version of {product-title}: @@ -131,8 +131,7 @@ $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} -o json | jq " $ oc adm upgrade ---- + -.Verification -* Check the current version by running the following command: +Check the current version by running the following command: + [source,terminal] ---- @@ -164,7 +163,8 @@ $ oc get csv -n {CNVNamespace} $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} -o json | jq ".status.versions" ---- + -.Example output +Example output: ++ [source,terminal,subs="attributes+"] ---- [ @@ -212,15 +212,14 @@ $ oc patch hyperconverged kubevirt-hyperconverged -n {CNVNamespace} --type json "[{\"op\":\"add\",\"path\":\"/spec/workloadUpdateStrategy/workloadUpdateMethods\", \"value\":{WorkloadUpdateMethodConfig}}]" ---- + -.Example output +Example output: ++ [source,terminal] ---- hyperconverged.hco.kubevirt.io/kubevirt-hyperconverged patched ---- + -.Verification - -* Check the status of VM migration by running the following command: +Check the status of VM migration by running the following command: + [source,terminal] ---- diff --git a/modules/virt-pxe-booting-with-mac-address.adoc b/modules/virt-pxe-booting-with-mac-address.adoc index 41d965bd5daa..6ca30c1bda7c 100644 --- a/modules/virt-pxe-booting-with-mac-address.adoc +++ b/modules/virt-pxe-booting-with-mac-address.adoc @@ -116,7 +116,8 @@ networks: $ oc create -f vmi-pxe-boot.yaml ---- + -.Example output +Example output: ++ [source,terminal] ---- virtualmachineinstance.kubevirt.io "vmi-pxe-boot" created @@ -156,7 +157,8 @@ In this case, we used `eth1` for the PXE boot, without an IP address. The other $ ip addr ---- + -.Example output +Example output: ++ [source,terminal] ---- ... diff --git a/modules/virt-querying-metrics.adoc b/modules/virt-querying-metrics.adoc index 718e60c12557..3101daeba841 100644 --- a/modules/virt-querying-metrics.adoc +++ b/modules/virt-querying-metrics.adoc @@ -32,7 +32,8 @@ A value above '0' means that the vCPU wants to run, but the host scheduler canno To query the vCPU metric, the `schedstats=enable` kernel argument must first be applied to the `MachineConfig` object. This kernel argument enables scheduler statistics used for debugging and performance tuning and adds a minor additional load to the scheduler. ==== -.Example vCPU wait time query +Example vCPU wait time query: + [source,promql] ---- topk(3, sum by (name, namespace) (rate(kubevirt_vmi_vcpu_wait_seconds_total[6m]))) > 0 <1> @@ -51,7 +52,8 @@ Returns the total amount of traffic received (in bytes) on the virtual machine's `kubevirt_vmi_network_transmit_bytes_total`:: Returns the total amount of traffic transmitted (in bytes) on the virtual machine's network. Type: Counter. -.Example network traffic query +Example network traffic query: + [source,promql] ---- topk(3, sum by (name, namespace) (rate(kubevirt_vmi_network_receive_bytes_total[6m])) + sum by (name, namespace) (rate(kubevirt_vmi_network_transmit_bytes_total[6m]))) > 0 <1> @@ -61,9 +63,9 @@ topk(3, sum by (name, namespace) (rate(kubevirt_vmi_network_receive_bytes_total[ [id="virt-promql-storage-metrics_{context}"] == Storage metrics -[id="virt-storage-traffic_{context}"] -=== Storage-related traffic - +Storage-related traffic:: ++ +-- The following queries can identify VMs that are writing large amounts of data: `kubevirt_vmi_storage_read_traffic_bytes_total`:: @@ -72,23 +74,26 @@ Returns the total amount (in bytes) of the virtual machine's storage-related tra `kubevirt_vmi_storage_write_traffic_bytes_total`:: Returns the total amount of storage writes (in bytes) of the virtual machine's storage-related traffic. Type: Counter. -.Example storage-related traffic query +Example storage-related traffic query: + [source,promql] ---- topk(3, sum by (name, namespace) (rate(kubevirt_vmi_storage_read_traffic_bytes_total[6m])) + sum by (name, namespace) (rate(kubevirt_vmi_storage_write_traffic_bytes_total[6m]))) > 0 <1> ---- <1> This query returns the top 3 VMs performing the most storage traffic at every given moment over a six-minute time period. +-- -[id="virt-storage-snapshot-data_{context}"] -=== Storage snapshot data - +Storage snapshot data:: ++ +-- `kubevirt_vmsnapshot_disks_restored_from_source`:: Returns the total number of virtual machine disks restored from the source virtual machine. Type: Gauge. `kubevirt_vmsnapshot_disks_restored_from_source_bytes`:: Returns the amount of space in bytes restored from the source virtual machine. Type: Gauge. -.Examples of storage snapshot data queries +Examples of storage snapshot data queries: + [source,promql] ---- kubevirt_vmsnapshot_disks_restored_from_source{vm_name="simple-vm", vm_namespace="default"} <1> @@ -100,10 +105,11 @@ kubevirt_vmsnapshot_disks_restored_from_source{vm_name="simple-vm", vm_namespace kubevirt_vmsnapshot_disks_restored_from_source_bytes{vm_name="simple-vm", vm_namespace="default"} <1> ---- <1> This query returns the amount of space in bytes restored from the source virtual machine. +-- -[id="virt-iops_{context}"] -=== I/O performance - +I/O performance:: ++ +-- The following queries can determine the I/O performance of storage devices: `kubevirt_vmi_storage_iops_read_total`:: @@ -112,12 +118,14 @@ Returns the amount of write I/O operations the virtual machine is performing per `kubevirt_vmi_storage_iops_write_total`:: Returns the amount of read I/O operations the virtual machine is performing per second. Type: Counter. -.Example I/O performance query +Example I/O performance query: + [source,promql] ---- topk(3, sum by (name, namespace) (rate(kubevirt_vmi_storage_iops_read_total[6m])) + sum by (name, namespace) (rate(kubevirt_vmi_storage_iops_write_total[6m]))) > 0 <1> ---- <1> This query returns the top 3 VMs performing the most I/O operations per second at every given moment over a six-minute time period. +-- [id="virt-promql-guest-memory-metrics_{context}"] == Guest memory swapping metrics @@ -130,7 +138,8 @@ Returns the total amount (in bytes) of memory the virtual guest is swapping in. `kubevirt_vmi_memory_swap_out_traffic_bytes`:: Returns the total amount (in bytes) of memory the virtual guest is swapping out. Type: Gauge. -.Example memory swapping query +Example memory swapping query: + [source,promql] ---- topk(3, sum by (name, namespace) (rate(kubevirt_vmi_memory_swap_in_traffic_bytes[6m])) + sum by (name, namespace) (rate(kubevirt_vmi_memory_swap_out_traffic_bytes[6m]))) > 0 <1> diff --git a/modules/virt-querying-the-node-exporter-service-for-metrics.adoc b/modules/virt-querying-the-node-exporter-service-for-metrics.adoc index a9bf45806af1..31234ae838f5 100644 --- a/modules/virt-querying-the-node-exporter-service-for-metrics.adoc +++ b/modules/virt-querying-the-node-exporter-service-for-metrics.adoc @@ -28,7 +28,8 @@ $ oc get service -n $ curl http://<172.30.226.162:9100>/metrics | grep -vE "^#|^$" ---- + -.Example output +Example output: ++ [source,terminal] ---- node_arp_entries{device="eth0"} 1 diff --git a/modules/virt-regional-dr-odf.adoc b/modules/virt-regional-dr-odf.adoc index 96570ced3944..26504fcabb82 100644 --- a/modules/virt-regional-dr-odf.adoc +++ b/modules/virt-regional-dr-odf.adoc @@ -8,9 +8,10 @@ {VirtProductName} supports the link:https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/latest/html-single/configuring_openshift_data_foundation_disaster_recovery_for_openshift_workloads/index#rdr-solution[Regional-DR solution for {rh-storage}], which provides asynchronous data replication at regular intervals between managed {VirtProductName} clusters installed on primary and secondary sites. -.Regional-DR differences +Regional-DR differences: + * Regional-DR supports higher network latency between the primary and secondary sites. * Regional-DR uses RBD snapshots to replicate data asynchronously. Currently, your applications must be resilient to small variances between VM disks. You can prevent these variances by using single disk VMs. * Using the import method when selecting a population source for your VM disk is recommended. However, you can protect VMs that use cloned PVCs if you select a `VolumeReplicationClass` that enables image flattening. For more information, see the {rh-storage} documentation. -For more information about using the Regional-DR solution for {rh-storage} with {VirtProductName}, see {ibm-title}'s {rh-storage} Regional-DR documentation. \ No newline at end of file +For more information about using the Regional-DR solution for {rh-storage} with {VirtProductName}, see {ibm-title}'s {rh-storage} Regional-DR documentation. diff --git a/modules/virt-remove-boot-order-item-web.adoc b/modules/virt-remove-boot-order-item-web.adoc index 0f7aab2f7b6f..f60a458e64a4 100644 --- a/modules/virt-remove-boot-order-item-web.adoc +++ b/modules/virt-remove-boot-order-item-web.adoc @@ -21,7 +21,7 @@ Remove items from a boot order list by using the web console. . Click the pencil icon that is located on the right side of *Boot Order*. . Click the *Remove* icon {delete} next to the item. The item is removed from the boot order list and saved in the list of available boot sources. If you remove all items from the boot order list, the following message displays: *No resource selected. VM will attempt to boot from disks by order of appearance in YAML file.* - ++ [NOTE] ==== If the virtual machine is running, changes to *Boot Order* will not take effect until you restart the virtual machine. diff --git a/modules/virt-removing-mediated-device-from-cluster-cli.adoc b/modules/virt-removing-mediated-device-from-cluster-cli.adoc index fbc7fd90287e..1798b9b3bfb9 100644 --- a/modules/virt-removing-mediated-device-from-cluster-cli.adoc +++ b/modules/virt-removing-mediated-device-from-cluster-cli.adoc @@ -23,7 +23,6 @@ $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} . Remove the device information from the `spec.mediatedDevicesConfiguration` and `spec.permittedHostDevices` stanzas of the `HyperConverged` CR. Removing both entries ensures that you can later create a new mediated device type on the same node. For example: + -.Example configuration file [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1 diff --git a/modules/virt-removing-pci-device-from-cluster-cli.adoc b/modules/virt-removing-pci-device-from-cluster-cli.adoc index ad7776d43f77..ffabaea5fe1e 100644 --- a/modules/virt-removing-pci-device-from-cluster-cli.adoc +++ b/modules/virt-removing-pci-device-from-cluster-cli.adoc @@ -19,9 +19,8 @@ To remove a PCI host device from the cluster, delete the information for that de $ oc edit hyperconverged kubevirt-hyperconverged -n {CNVNamespace} ---- -. Remove the PCI device information from the `spec.permittedHostDevices.pciHostDevices` array by deleting the `pciDeviceSelector`, `resourceName` and `externalResourceProvider` (if applicable) fields for the appropriate device. In this example, the `intel.com/qat` resource has been deleted. +. Remove the PCI device information from the `spec.permittedHostDevices.pciHostDevices` array by deleting the `pciDeviceSelector`, `resourceName` and `externalResourceProvider` (if applicable) fields for the appropriate device. In this example, the `intel.com/qat` resource has been deleted: + -.Example configuration file [source,yaml,subs="attributes+"] ---- apiVersion: hco.kubevirt.io/v1 @@ -49,7 +48,8 @@ spec: $ oc describe node ---- + -.Example output +Example output: ++ [source,terminal] ---- Capacity: diff --git a/modules/virt-removing-vm-delete-protection.adoc b/modules/virt-removing-vm-delete-protection.adoc index be61bf7e2fe6..effa975cb242 100644 --- a/modules/virt-removing-vm-delete-protection.adoc +++ b/modules/virt-removing-vm-delete-protection.adoc @@ -22,7 +22,6 @@ You can remove the delete protection option by establishing a validation admissi . Create the validation admission policy, as shown in the following example: + -.Example validation admission policy file [source,yaml] ---- apiVersion: admissionregistration.k8s.io/v1 @@ -58,7 +57,6 @@ $ oc apply -f disable-vm-delete-protection.yaml . Create the validation admission policy binding, as shown in the following example: + -.Example validation admission policy binding file [source,yaml] ---- apiVersion: admissionregistration.k8s.io/v1 diff --git a/modules/virt-restoring-vm-from-snapshot-cli.adoc b/modules/virt-restoring-vm-from-snapshot-cli.adoc index 784f62baaf8b..bab9ccb1c85f 100644 --- a/modules/virt-restoring-vm-from-snapshot-cli.adoc +++ b/modules/virt-restoring-vm-from-snapshot-cli.adoc @@ -56,7 +56,8 @@ The snapshot controller updates the status fields of the `VirtualMachineRestore` $ oc get vmrestore ---- + -.Example output +Example output: ++ [source, yaml] ---- apiVersion: snapshot.kubevirt.io/v1beta1 diff --git a/modules/virt-running-real-time-checkup.adoc b/modules/virt-running-real-time-checkup.adoc index 2bdf135a1c33..30a72574b738 100644 --- a/modules/virt-running-real-time-checkup.adoc +++ b/modules/virt-running-real-time-checkup.adoc @@ -15,11 +15,9 @@ Use a predefined checkup to verify that your {product-title} cluster can run vir .Procedure -. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest file for the real-time checkup: +. Create a `ServiceAccount`, `Role`, and `RoleBinding` manifest file for the real-time checkup. For example: + -.Example service account, role, and rolebinding manifest file [%collapsible] -==== [source,yaml] ---- --- @@ -76,7 +74,6 @@ roleRef: kind: Role name: kubevirt-realtime-checker ---- -==== . Apply the `ServiceAccount`, `Role`, and `RoleBinding` manifest to the target namespace: + @@ -85,9 +82,8 @@ roleRef: $ oc apply -n -f .yaml ---- -. Create a `ConfigMap` manifest file that contains the input parameters for the checkup: +. Create a `ConfigMap` manifest file that contains the input parameters for the checkup. For example: + -.Example input config map [source,yaml] ---- apiVersion: v1 @@ -112,9 +108,8 @@ data: $ oc apply -n -f .yaml ---- -. Create a `Job` manifest to run the checkup: +. Create a `Job` manifest to run the checkup. For example: + -.Example job manifest [source,yaml,subs="attributes+"] ---- apiVersion: batch/v1 @@ -172,7 +167,8 @@ $ oc wait job realtime-checkup -n --for condition=complete -- $ oc get configmap realtime-checkup-config -n -o yaml ---- + -.Example output config map (success) +Example output config map (success): ++ [source,yaml] ---- apiVersion: v1 diff --git a/modules/virt-setting-cpu-allocation-ratio.adoc b/modules/virt-setting-cpu-allocation-ratio.adoc index 51518b8ff1f7..b83e22db2c88 100644 --- a/modules/virt-setting-cpu-allocation-ratio.adoc +++ b/modules/virt-setting-cpu-allocation-ratio.adoc @@ -18,8 +18,6 @@ To change the default number of vCPUs mapped to each physical CPU, set the `vmiC .Procedure -Set the `vmiCPUAllocationRatio` value in the `HyperConverged` CR to define a node CPU allocation ratio. - . Open the `HyperConverged` CR in your default editor by running the following command: + [source,terminal] diff --git a/modules/virt-specializing-windows-sysprep.adoc b/modules/virt-specializing-windows-sysprep.adoc index 5dcf31d955ae..6b0d80fef881 100644 --- a/modules/virt-specializing-windows-sysprep.adoc +++ b/modules/virt-specializing-windows-sysprep.adoc @@ -24,4 +24,6 @@ Specializing a Windows virtual machine (VM) configures the computer-specific inf . In the *Sysprep* section, click *Edit*, browse to the `unattend.xml` answer file, and click *Save*. . Click *Create VirtualMachine*. +.Result + During the initial boot, Windows uses the `unattend.xml` answer file to specialize the VM. The VM is now ready to use. diff --git a/modules/virt-starting-vm-web.adoc b/modules/virt-starting-vm-web.adoc index fef8532639b0..887ddf96215e 100644 --- a/modules/virt-starting-vm-web.adoc +++ b/modules/virt-starting-vm-web.adoc @@ -31,7 +31,7 @@ You can start a virtual machine (VM) from the web console. .. Access the *VirtualMachine details* page by clicking the name of the VM. .. Click *Actions* -> *Start*. - ++ [NOTE] ==== When you start VM that is provisioned from a `URL` source for the first time, the VM has a status of *Importing* while {VirtProductName} imports the container from the URL endpoint. Depending on the size of the image, this process might take several minutes. diff --git a/modules/virt-storage-checkup-web-console.adoc b/modules/virt-storage-checkup-web-console.adoc index 737d89fe8b2b..99b146f9dafd 100644 --- a/modules/virt-storage-checkup-web-console.adoc +++ b/modules/virt-storage-checkup-web-console.adoc @@ -18,4 +18,6 @@ Run a storage checkup to validate that storage is working correctly for virtual . Enter a timeout value for the checkup in the *Timeout (minutes)* fields. . Click *Run*. -You can view the status of the storage checkup in the *Checkups* list on the *Storage* tab. Click on the name of the checkup for more details. \ No newline at end of file +.Result + +You can view the status of the storage checkup in the *Checkups* list on the *Storage* tab. Click on the name of the checkup for more details. diff --git a/modules/virt-storage-wizard-fields-web.adoc b/modules/virt-storage-wizard-fields-web.adoc index 370b8cbbc9a3..1c52da488e3d 100644 --- a/modules/virt-storage-wizard-fields-web.adoc +++ b/modules/virt-storage-wizard-fields-web.adoc @@ -48,7 +48,6 @@ endif::openshift-rosa,openshift-dedicated[] |=== [id="virt-storage-wizard-fields-advanced-web_{context}"] -[discrete] == Advanced storage settings The following advanced storage settings are optional and available for *Blank*, *Import via URL*, and *Clone existing PVC* disks. diff --git a/modules/virt-temporary-token-VNC.adoc b/modules/virt-temporary-token-VNC.adoc index f4676fe417bf..9ebb529d5089 100644 --- a/modules/virt-temporary-token-VNC.adoc +++ b/modules/virt-temporary-token-VNC.adoc @@ -50,7 +50,7 @@ Sample output: ---- $ export VNC_TOKEN="" ---- - ++ You can now use the token to access the VNC console of a VM. .Verification diff --git a/modules/virt-troubleshooting-cert-rotation-parameters.adoc b/modules/virt-troubleshooting-cert-rotation-parameters.adoc index 2539b18f8ca3..ddfe82605139 100644 --- a/modules/virt-troubleshooting-cert-rotation-parameters.adoc +++ b/modules/virt-troubleshooting-cert-rotation-parameters.adoc @@ -17,9 +17,8 @@ Deleting one or more `certConfig` values causes them to revert to the default va If the default values conflict with these conditions, you will receive an error. -If you remove the `server.duration` value in the following example, the default value of `24h0m0s` is greater than the value of `ca.duration`, conflicting with the specified conditions. +If you remove the `server.duration` value in the following example, the default value of `24h0m0s` is greater than the value of `ca.duration`, conflicting with the specified conditions: -.Example [source,yaml] ---- certConfig: diff --git a/modules/virt-troubleshooting-incorrect-policy-config.adoc b/modules/virt-troubleshooting-incorrect-policy-config.adoc index d7f67da6ab19..cb444b424299 100644 --- a/modules/virt-troubleshooting-incorrect-policy-config.adoc +++ b/modules/virt-troubleshooting-incorrect-policy-config.adoc @@ -53,7 +53,8 @@ spec: $ oc apply -f ens01-bridge-testfail.yaml ---- + -.Example output +Example output: ++ [source,terminal] ---- nodenetworkconfigurationpolicy.nmstate.io/ens01-bridge-testfail created @@ -68,7 +69,8 @@ $ oc get nncp + The output shows that the policy failed: + -.Example output +Example output: ++ [source,terminal] ---- NAME STATUS @@ -86,7 +88,8 @@ $ oc get nnce + The output shows that the policy failed on all nodes: + -.Example output +Example output: ++ [source,terminal] ---- NAME STATUS @@ -105,7 +108,8 @@ compute-3.ens01-bridge-testfail FailedToConfigure $ oc get nnce compute-1.ens01-bridge-testfail -o jsonpath='{.status.conditions[?(@.type=="Failing")].message}' ---- + -.Example output +Example output: ++ [source,terminal] ---- [2024-10-10T08:40:46Z INFO nmstatectl] Nmstate version: 2.2.37 @@ -123,7 +127,8 @@ $ oc get nns control-plane-1 -o yaml + The output shows that the interface name on the nodes is `ens1` but the failed policy incorrectly uses `ens01`: + -.Example output +Example output: ++ [source,yaml] ---- - ipv4: @@ -156,7 +161,8 @@ Save the policy to apply the correction. $ oc get nncp ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME STATUS diff --git a/modules/virt-update-node-network-config-form.adoc b/modules/virt-update-node-network-config-form.adoc index fb3f63914b20..77f37422736e 100644 --- a/modules/virt-update-node-network-config-form.adoc +++ b/modules/virt-update-node-network-config-form.adoc @@ -10,7 +10,7 @@ . Edit the fields that you want to update. . Click *Save*. - ++ [NOTE] ==== Addition of a VLAN interface using the form is not supported. To add a VLAN interface, you must use YAML to create the policy. Once added, you cannot edit the policy using form. diff --git a/modules/virt-updating-multiple-vms.adoc b/modules/virt-updating-multiple-vms.adoc index 4b231133b25a..9295abad1757 100644 --- a/modules/virt-updating-multiple-vms.adoc +++ b/modules/virt-updating-multiple-vms.adoc @@ -76,7 +76,6 @@ spec: restartPolicy: Never serviceAccountName: kubevirt-api-lifecycle-automation ---- - <1> Replace the image value with your pull URL for the image. <2> Replace the `MACHINE_TYPE_GLOB` value with your own pattern. This pattern is used to detect deprecated machine types that need to be upgraded. <3> If the `RESTART_REQUIRED` emvironment variable is set to `true`, VMs are restarted after the machine type is updated. If you do not want VMs to be restarted, set the value to `false`. diff --git a/modules/virt-using-virtctl-ssh-command.adoc b/modules/virt-using-virtctl-ssh-command.adoc index efbafb8bdfd6..ca77fae9d638 100644 --- a/modules/virt-using-virtctl-ssh-command.adoc +++ b/modules/virt-using-virtctl-ssh-command.adoc @@ -23,10 +23,9 @@ You can access a running virtual machine (VM) by using the `virtcl ssh` command. ---- $ virtctl -n ssh @example-vm -i <1> ---- -<1> Specify the namespace, user name, and the SSH private key. The default SSH key location is `/home/user/.ssh`. If you save the key in a different location, you must specify the path. +<1> Specify the namespace, user name, and the SSH private key. The default SSH key location is `/home/user/.ssh`. If you save the key in a different location, you must specify the path. For example: + -.Example [source,terminal] ---- $ virtctl -n my-namespace ssh cloud-user@example-vm -i my-key ----- \ No newline at end of file +---- diff --git a/modules/virt-using-wasp-agent-to-configure-higher-vm-workload-density.adoc b/modules/virt-using-wasp-agent-to-configure-higher-vm-workload-density.adoc index 4774f45166cf..ed8595857b7e 100644 --- a/modules/virt-using-wasp-agent-to-configure-higher-vm-workload-density.adoc +++ b/modules/virt-using-wasp-agent-to-configure-higher-vm-workload-density.adoc @@ -34,7 +34,6 @@ The `wasp-agent` component deploys an Open Container Initiative (OCI) hook to en . Configure the `kubelet` service to permit swap usage: .. Create or edit a `KubeletConfig` file with the parameters shown in the following example: + -.Example of a `KubeletConfig` file [source,yaml] ---- apiVersion: machineconfiguration.openshift.io/v1 @@ -115,7 +114,8 @@ To have enough swap space for the worst-case scenario, make sure to have at leas NODE_SWAP_SPACE = NODE_RAM * (MEMORY_OVER_COMMIT_PERCENT / 100% - 1) ---- + -.Example +For example: ++ [source,terminal] ---- NODE_SWAP_SPACE = 16 GB * (150% / 100% - 1) @@ -299,7 +299,8 @@ $ oc -n openshift-cnv patch HyperConverged/kubevirt-hyperconverged --type='json' ]' ---- + -.Successful output +Successful output: ++ [source,terminal] ---- hyperconverged.hco.kubevirt.io/kubevirt-hyperconverged patched @@ -317,7 +318,8 @@ $ oc rollout status ds wasp-agent -n wasp + If the deployment is successful, the following message is displayed: + -.Example output +Example output: ++ [source, terminal] ---- daemon set "wasp-agent" successfully rolled out @@ -355,7 +357,8 @@ If swap is provisioned, an amount greater than zero is displayed in the `Swap:` $ oc -n openshift-cnv get HyperConverged/kubevirt-hyperconverged -o jsonpath='{.spec.higherWorkloadDensity}{"\n"}' ---- + -.Example output +Example output: ++ [source,terminal] ---- {"memoryOvercommitPercentage":150} diff --git a/modules/virt-verify-status-bootsource-update.adoc b/modules/virt-verify-status-bootsource-update.adoc index f86094776d3c..72182166ab75 100644 --- a/modules/virt-verify-status-bootsource-update.adoc +++ b/modules/virt-verify-status-bootsource-update.adoc @@ -22,8 +22,8 @@ You can determine if a boot source is system-defined or custom by viewing the `H $ oc get hyperconverged kubevirt-hyperconverged -n {CNVNamespace} -o yaml ---- + -.Example output - +Example output: ++ [source,yaml] ---- apiVersion: hco.kubevirt.io/v1beta1 diff --git a/modules/virt-viewing-automatically-created-storage-profiles.adoc b/modules/virt-viewing-automatically-created-storage-profiles.adoc index d1a1dee0de10..597070a384ad 100644 --- a/modules/virt-viewing-automatically-created-storage-profiles.adoc +++ b/modules/virt-viewing-automatically-created-storage-profiles.adoc @@ -27,7 +27,8 @@ $ oc get storageprofile $ oc describe storageprofile ---- + -.Example storage profile details +Example storage profile details: ++ [source,yaml] ---- Name: ocs-storagecluster-ceph-rbd-virtualization diff --git a/modules/virt-viewing-downward-metrics-tool.adoc b/modules/virt-viewing-downward-metrics-tool.adoc index f8890092571f..59eafd5886b4 100644 --- a/modules/virt-viewing-downward-metrics-tool.adoc +++ b/modules/virt-viewing-downward-metrics-tool.adoc @@ -29,7 +29,8 @@ $ sudo dnf install -y vm-dump-metrics $ sudo vm-dump-metrics ---- + -.Example output +Example output: ++ [source,xml] ---- @@ -46,4 +47,4 @@ $ sudo vm-dump-metrics kubevirt.io ----- \ No newline at end of file +---- diff --git a/modules/virt-viewing-logs-cli.adoc b/modules/virt-viewing-logs-cli.adoc index 8a4e40547097..9db80d4cc300 100644 --- a/modules/virt-viewing-logs-cli.adoc +++ b/modules/virt-viewing-logs-cli.adoc @@ -21,9 +21,9 @@ You can view logs for the {VirtProductName} pods by using the `oc` CLI tool. $ oc get pods -n {CNVNamespace} ---- + -.Example output +Example output: ++ [%collapsible] -==== [source,terminal] ---- NAME READY STATUS RESTARTS AGE @@ -38,7 +38,6 @@ virt-handler-9qs6z 1/1 Running 0 30m virt-operator-7ccfdbf65f-q5snk 1/1 Running 0 32m virt-operator-7ccfdbf65f-vllz8 1/1 Running 0 32m ---- -==== . View the pod log by running the following command: + @@ -54,9 +53,9 @@ If a pod fails to start, you can use the `--previous` option to view logs from t To monitor log output in real time, use the `-f` option. ==== + -.Example output +Example output: ++ [%collapsible] -==== [source,terminal] ---- {"component":"virt-handler","level":"info","msg":"set verbosity to 2","pos":"virt-handler.go:453","timestamp":"2022-04-17T08:58:37.373695Z"} @@ -66,4 +65,3 @@ To monitor log output in real time, use the `-f` option. {"component":"virt-handler","level":"warning","msg":"host model mode is expected to contain only one model","pos":"cpu_plugin.go:103","timestamp":"2022-04-17T08:58:37.390263Z"} {"component":"virt-handler","level":"info","msg":"node-labeller is running","pos":"node_labeller.go:94","timestamp":"2022-04-17T08:58:37.391011Z"} ---- -==== diff --git a/modules/virt-viewing-network-state-of-node.adoc b/modules/virt-viewing-network-state-of-node.adoc index 5252ad2497db..32adbce4d639 100644 --- a/modules/virt-viewing-network-state-of-node.adoc +++ b/modules/virt-viewing-network-state-of-node.adoc @@ -28,7 +28,8 @@ $ oc get nns $ oc get nns node01 -o yaml ---- + -.Example output +Example output: ++ [source,yaml] ---- apiVersion: nmstate.io/v1 diff --git a/modules/virt-viewing-vmi-ip-cli.adoc b/modules/virt-viewing-vmi-ip-cli.adoc index 05045cb930de..8bd422e75968 100644 --- a/modules/virt-viewing-vmi-ip-cli.adoc +++ b/modules/virt-viewing-vmi-ip-cli.adoc @@ -26,7 +26,8 @@ You must install the QEMU guest agent on a VM to view the IP address of a second $ oc describe vmi ---- + -.Example output +Example output: ++ [source,yaml] ---- # ... diff --git a/modules/virt-vm-behavior-dr.adoc b/modules/virt-vm-behavior-dr.adoc index 4356fc8779f9..55240208ff4c 100644 --- a/modules/virt-vm-behavior-dr.adoc +++ b/modules/virt-vm-behavior-dr.adoc @@ -8,18 +8,14 @@ VMs typically act similarly to pod-based workloads during both relocate and failover disaster recovery flows. -[discrete] -[id="dr-relocate_{context}"] -== Relocate - +Relocate:: ++ Use relocate to move an application from the primary environment to the secondary environment when the primary environment is still accessible. During relocate, the VM is gracefully terminated, any unreplicated data is synchronized to the secondary environment, and the VM starts in the secondary environment. - ++ Because the VM terminates gracefully, there is no data loss. Therefore, the VM operating system will not perform crash recovery. -[discrete] -[id="dr-failover_{context}"] -== Failover - +Failover:: ++ Use failover when there is a critical failure in the primary environment that makes it impractical or impossible to use relocation to move the workload to a secondary environment. When failover is executed, the storage is fenced from the primary environment, the I/O to the VM disks is abruptly halted, and the VM restarts in the secondary environment using the replicated data. - -You should expect data loss due to failover. The extent of loss depends on whether you use Metro-DR, which uses synchronous replication, or Regional-DR, which uses asynchronous replication. Because Regional-DR uses snapshot-based replication intervals, the window of data loss is proportional to the replication interval length. When the VM restarts, the operating system might perform crash recovery. \ No newline at end of file ++ +You should expect data loss due to failover. The extent of loss depends on whether you use Metro-DR, which uses synchronous replication, or Regional-DR, which uses asynchronous replication. Because Regional-DR uses snapshot-based replication intervals, the window of data loss is proportional to the replication interval length. When the VM restarts, the operating system might perform crash recovery. diff --git a/modules/virt-vm-custom-scheduler.adoc b/modules/virt-vm-custom-scheduler.adoc index efafaee1392c..adddb609ba43 100644 --- a/modules/virt-vm-custom-scheduler.adoc +++ b/modules/virt-vm-custom-scheduler.adoc @@ -49,7 +49,8 @@ spec: $ oc get pods ---- + -.Example output +Example output: ++ [source,terminal] ---- NAME READY STATUS RESTARTS AGE @@ -65,7 +66,8 @@ $ oc describe pod virt-launcher-vm-fedora-dpc87 + The value of the `From` field in the output verifies that the scheduler name matches the custom scheduler specified in the `VirtualMachine` manifest: + -.Example output +Example output: ++ [source,terminal] ---- [...] diff --git a/modules/virt-vmware-comparison.adoc b/modules/virt-vmware-comparison.adoc index c06b5aed5fd9..9724b6a2166a 100644 --- a/modules/virt-vmware-comparison.adoc +++ b/modules/virt-vmware-comparison.adoc @@ -15,18 +15,22 @@ If you are familiar with {vmw-first}, the following table lists {VirtProductName |{vmw-short} concept |{VirtProductName} |Explanation |Datastore -|Persistent volume (PV){nbsp}+ + +a|Persistent volume (PV) + Persistent volume claim (PVC) |Stores VM disks. A PV represents existing storage and is attached to a VM through a PVC. When created with the `ReadWriteMany` (RWX) access mode, PVCs can be mounted by multiple VMs simultaneously. |Dynamic Resource Scheduling (DRS) -|Pod eviction policy{nbsp}+ + +a|Pod eviction policy + Descheduler |Provides active resource balancing. A combination of pod eviction policies and a descheduler allows VMs to be live migrated to more appropriate nodes to keep node resource utilization manageable. |NSX -|Multus{nbsp}+ + -OVN-Kubernetes{nbsp}+ + +a|Multus + +OVN-Kubernetes + Third-party container network interface (CNI) plug-ins |Provides an overlay network configuration. There is no direct equivalent for NSX in {VirtProductName}, but you can use the OVN-Kubernetes network provider or install certified third-party CNI plug-ins. @@ -34,7 +38,8 @@ Third-party container network interface (CNI) plug-ins |Storage class |Provides policy-based storage selection. Storage classes represent various storage types and describe storage capabilities, such as quality of service, backup policy, reclaim policy, and whether volume expansion is allowed. A PVC can request a specific storage class to satisfy application requirements. -|vCenter + +a|vCenter + vRealize Operations |OpenShift Metrics and Monitoring |Provides host and VM metrics. You can view metrics and monitor the overall health of the cluster and VMs by using the {product-title} web console. @@ -43,9 +48,12 @@ vRealize Operations |Live migration |Moves a running VM to another node without interruption. For live migration to be available, the PVC attached to the VM must have the `ReadWriteMany` (RWX) access mode. -|vSwitch + +a|vSwitch + DvSwitch -|NMState Operator{nbsp}+ + + +a|NMState Operator + Multus |Provides a physical network configuration. You can use the NMState Operator to apply state-driven network configuration and manage various network interface types, including Linux bridges and network bonds. With Multus, you can attach multiple network interfaces and connect VMs to external networks. |=== diff --git a/modules/virt-wasp-agent-pod-eviction.adoc b/modules/virt-wasp-agent-pod-eviction.adoc index 8fac4c70275c..8e35beaeb70a 100644 --- a/modules/virt-wasp-agent-pod-eviction.adoc +++ b/modules/virt-wasp-agent-pod-eviction.adoc @@ -10,9 +10,8 @@ The wasp agent manages pod eviction when the system is heavily loaded and nodes High swap I/O traffic:: -This condition is met when swap-related I/O traffic is excessively high. +This condition is met when swap-related I/O traffic is excessively high: + -.Condition [source,text] ---- averageSwapInPerSecond > maxAverageSwapInPagesPerSecond @@ -24,9 +23,8 @@ By default, `maxAverageSwapInPagesPerSecond` and `maxAverageSwapOutPagesPerSecon High swap utilization:: -This condition is met when swap utilization is excessively high, causing the current virtual memory usage to exceed the factored threshold. The `NODE_SWAP_SPACE` setting in your `MachineConfig` object can impact this condition. +This condition is met when swap utilization is excessively high, causing the current virtual memory usage to exceed the factored threshold. The `NODE_SWAP_SPACE` setting in your `MachineConfig` object can impact this condition: + -.Condition [source,text] ---- nodeWorkingSet + nodeSwapUsage < totalNodeMemory + totalSwapMemory × thresholdFactor @@ -48,4 +46,4 @@ You can use the following environment variables to adjust the values used to cal |Sets the `thresholdFactor` value used to calculate high swap utilization. |`AVERAGE_WINDOW_SIZE_SECONDS` |Sets the time interval for calculating the average swap usage. -|=== \ No newline at end of file +|===