diff --git a/.github/workflows/generate-and-build-sdks.yml b/.github/workflows/generate-and-build-sdks.yml
index 78ec2579a7f..87da4b1d8f5 100644
--- a/.github/workflows/generate-and-build-sdks.yml
+++ b/.github/workflows/generate-and-build-sdks.yml
@@ -47,6 +47,14 @@ jobs:
name: SDK_Source_PowerShell
path: _build/install/default/xapi/sdk/powershell/*
+ - name: Store Go SDK Artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: SDK_Artifacts_Go
+ path: |
+ _build/install/default/xapi/sdk/go/*
+ !_build/install/default/xapi/sdk/go/dune
+
- name: Trim dune cache
run: opam exec -- dune cache trim --size=2GiB
diff --git a/.github/workflows/other.yml b/.github/workflows/other.yml
index fc4be895fd1..959679a92b3 100644
--- a/.github/workflows/other.yml
+++ b/.github/workflows/other.yml
@@ -45,26 +45,26 @@ jobs:
env:
SKIP: no-commit-to-branch
- - name: Run Pytest for python 2 and get code coverage for Codecov
+ - name: Run Pytest for python 2 and get code coverage
if: ${{ matrix.python-version == '2.7' }}
run: >
pip install enum future mock pytest-coverage pytest-mock &&
pytest
- --cov=scripts scripts --cov-fail-under 45 -vv -rA
+ --cov=scripts --cov=ocaml/xcp-rrdd
+ scripts/ ocaml/xcp-rrdd -vv -rA
+ --junitxml=.git/pytest${{matrix.python-version}}.xml
--cov-report term-missing
--cov-report xml:.git/coverage${{matrix.python-version}}.xml
+ env:
+ PYTHONDEVMODE: yes
- - name: Upload Python ${{matrix.python-version}} coverage report to Codecov
- if: ${{ matrix.python-version != '2.7' }}
- uses: codecov/codecov-action@v3
+ - name: Upload coverage report to Coveralls
+ uses: coverallsapp/github-action@v2
with:
- directory: .git
- files: coverage${{matrix.python-version}}.xml
- env_vars: OS,PYTHON
- fail_ci_if_error: false
- flags: python${{matrix.python-version}}
- name: coverage${{matrix.python-version}}
- verbose: true
+ format: cobertura
+ files: .git/coverage${{matrix.python-version}}.xml
+ flag-name: python${{matrix.python-version}}
+ parallel: true
- uses: dciborow/action-pylint@0.1.0
if: ${{ matrix.python-version != '2.7' }}
@@ -89,6 +89,20 @@ jobs:
github_token: ${{ secrets.github_token }}
continue-on-error: true
+ # For coverage of 2.7 and 3.11 we upload to Coveralls in parallel mode.
+ # To view the Coveralls results of the PR, click on the "Details" link to the right
+ # of the Coveralls Logo in the Checks section of the PR.
+ finish-parallel-coveralls-upload:
+ name: Finish coverage upload
+ needs: python-test # run after the python-test has completed uploading coverages
+ runs-on: ubuntu-latest
+ steps:
+ - name: Finish the parallel coverage upload to Coveralls
+ uses: coverallsapp/github-action@v2
+ with:
+ parallel-finished: true
+ continue-on-error: true # Do not fail CI if this step fails
+
deprecation-test:
name: Deprecation tests
runs-on: ubuntu-22.04
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1c3dca70fcd..9a051ef15f9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -46,6 +46,12 @@ jobs:
runs-on: ubuntu-latest
needs: [build-python, build-sdks]
steps:
+ - name: Retrieve Go SDK distribution artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: SDK_Artifacts_Go
+ path: sdk_go/
+
- name: Retrieve Python SDK distribution artifacts
uses: actions/download-artifact@v4
with:
@@ -93,12 +99,17 @@ jobs:
shell: bash
run: zip PowerShell-SDK-7.x-prerelease-unsigned.zip ./sdk_powershell_7x -r
+ - name: Zip Go SDK artifacts for deployment
+ shell: bash
+ run: zip Go-SDK-prerelease-unsigned.zip ./sdk_go -r
+
- name: Create release ${{ github.ref_name }}
shell: bash
run: |
gh release create ${{ github.ref_name }} --repo ${{ github.repository }} --generate-notes dist/* \
PowerShell-SDK-5.x-prerelease-unsigned.zip \
PowerShell-SDK-7.x-prerelease-unsigned.zip \
+ Go-SDK-prerelease-unsigned.zip \
libxenserver-prerelease.tar.gz libxenserver-prerelease.src.tar.gz
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/Makefile b/Makefile
index 256a3bfcd05..c01c29eea47 100644
--- a/Makefile
+++ b/Makefile
@@ -256,7 +256,7 @@ install: build doc sdk doc-json
message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \
message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli \
xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \
- xapi-rrd xapi-inventory \
+ xapi-rrd xapi-inventory clock \
xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck
# docs
mkdir -p $(DESTDIR)$(DOCDIR)
@@ -278,7 +278,7 @@ uninstall:
message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \
message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-log \
xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \
- xapi-rrd xapi-inventory \
+ xapi-rrd xapi-inventory clock \
xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck
compile_flags.txt: Makefile
diff --git a/clock.opam b/clock.opam
new file mode 100644
index 00000000000..44c24235c58
--- /dev/null
+++ b/clock.opam
@@ -0,0 +1,32 @@
+# This file is generated by dune, edit dune-project instead
+opam-version: "2.0"
+synopsis: "Xapi's library for managing time"
+maintainer: ["Xapi project maintainers"]
+authors: ["Jonathan Ludlam" "Pau Ruiz Safont"]
+license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
+homepage: "https://xapi-project.github.io/"
+bug-reports: "https://github.com/xapi-project/xen-api/issues"
+depends: [
+ "dune" {>= "3.0"}
+ "ocaml" {>= "4.12"}
+ "alcotest" {with-test}
+ "astring"
+ "mtime"
+ "ptime"
+ "odoc" {with-doc}
+]
+build: [
+ ["dune" "subst"] {dev}
+ [
+ "dune"
+ "build"
+ "-p"
+ name
+ "-j"
+ jobs
+ "@install"
+ "@runtest" {with-test}
+ "@doc" {with-doc}
+ ]
+]
+dev-repo: "git+https://github.com/xapi-project/xen-api.git"
diff --git a/doc/content/xen-api/classes/_index.html b/doc/content/xen-api/classes/_index.html
index d466e62368c..01a5748de6f 100644
--- a/doc/content/xen-api/classes/_index.html
+++ b/doc/content/xen-api/classes/_index.html
@@ -2,6 +2,7 @@
title = "XenAPI Reference"
layout = "class"
type = "xenapi"
+weight = 100
+++
diff --git a/doc/content/xen-api/releases/1.250.0.md b/doc/content/xen-api/releases/1.250.0.md
new file mode 100644
index 00000000000..82dd803d464
--- /dev/null
+++ b/doc/content/xen-api/releases/1.250.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.250.0"
+layout = "release"
+type = "xenapi"
+release = "1.250.0"
+weight = 38
++++
diff --git a/doc/content/xen-api/releases/1.257.0.md b/doc/content/xen-api/releases/1.257.0.md
new file mode 100644
index 00000000000..dbc387522b1
--- /dev/null
+++ b/doc/content/xen-api/releases/1.257.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.257.0"
+layout = "release"
+type = "xenapi"
+release = "1.257.0"
+weight = 37
++++
diff --git a/doc/content/xen-api/releases/1.271.0.md b/doc/content/xen-api/releases/1.271.0.md
new file mode 100644
index 00000000000..a315596be00
--- /dev/null
+++ b/doc/content/xen-api/releases/1.271.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.271.0"
+layout = "release"
+type = "xenapi"
+release = "1.271.0"
+weight = 36
++++
diff --git a/doc/content/xen-api/releases/1.290.0.md b/doc/content/xen-api/releases/1.290.0.md
new file mode 100644
index 00000000000..b013eba0625
--- /dev/null
+++ b/doc/content/xen-api/releases/1.290.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.290.0"
+layout = "release"
+type = "xenapi"
+release = "1.290.0"
+weight = 35
++++
diff --git a/doc/content/xen-api/releases/1.294.0.md b/doc/content/xen-api/releases/1.294.0.md
new file mode 100644
index 00000000000..28b7eb16705
--- /dev/null
+++ b/doc/content/xen-api/releases/1.294.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.294.0"
+layout = "release"
+type = "xenapi"
+release = "1.294.0"
+weight = 34
++++
diff --git a/doc/content/xen-api/releases/1.297.0.md b/doc/content/xen-api/releases/1.297.0.md
new file mode 100644
index 00000000000..5722a2e10ef
--- /dev/null
+++ b/doc/content/xen-api/releases/1.297.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.297.0"
+layout = "release"
+type = "xenapi"
+release = "1.297.0"
+weight = 33
++++
diff --git a/doc/content/xen-api/releases/1.298.0.md b/doc/content/xen-api/releases/1.298.0.md
new file mode 100644
index 00000000000..230a6832557
--- /dev/null
+++ b/doc/content/xen-api/releases/1.298.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.298.0"
+layout = "release"
+type = "xenapi"
+release = "1.298.0"
+weight = 32
++++
diff --git a/doc/content/xen-api/releases/1.301.0.md b/doc/content/xen-api/releases/1.301.0.md
new file mode 100644
index 00000000000..1b6f714bc05
--- /dev/null
+++ b/doc/content/xen-api/releases/1.301.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.301.0"
+layout = "release"
+type = "xenapi"
+release = "1.301.0"
+weight = 31
++++
diff --git a/doc/content/xen-api/releases/1.303.0.md b/doc/content/xen-api/releases/1.303.0.md
new file mode 100644
index 00000000000..62d65595ddb
--- /dev/null
+++ b/doc/content/xen-api/releases/1.303.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.303.0"
+layout = "release"
+type = "xenapi"
+release = "1.303.0"
+weight = 30
++++
diff --git a/doc/content/xen-api/releases/1.304.0.md b/doc/content/xen-api/releases/1.304.0.md
new file mode 100644
index 00000000000..19bc602d1af
--- /dev/null
+++ b/doc/content/xen-api/releases/1.304.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.304.0"
+layout = "release"
+type = "xenapi"
+release = "1.304.0"
+weight = 29
++++
diff --git a/doc/content/xen-api/releases/1.307.0.md b/doc/content/xen-api/releases/1.307.0.md
new file mode 100644
index 00000000000..ae89b065da1
--- /dev/null
+++ b/doc/content/xen-api/releases/1.307.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.307.0"
+layout = "release"
+type = "xenapi"
+release = "1.307.0"
+weight = 28
++++
diff --git a/doc/content/xen-api/releases/1.313.0.md b/doc/content/xen-api/releases/1.313.0.md
new file mode 100644
index 00000000000..3a0cd73b6bc
--- /dev/null
+++ b/doc/content/xen-api/releases/1.313.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.313.0"
+layout = "release"
+type = "xenapi"
+release = "1.313.0"
+weight = 27
++++
diff --git a/doc/content/xen-api/releases/1.318.0.md b/doc/content/xen-api/releases/1.318.0.md
new file mode 100644
index 00000000000..4661e86a0f8
--- /dev/null
+++ b/doc/content/xen-api/releases/1.318.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.318.0"
+layout = "release"
+type = "xenapi"
+release = "1.318.0"
+weight = 26
++++
diff --git a/doc/content/xen-api/releases/1.329.0.md b/doc/content/xen-api/releases/1.329.0.md
new file mode 100644
index 00000000000..501551c8692
--- /dev/null
+++ b/doc/content/xen-api/releases/1.329.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 1.329.0"
+layout = "release"
+type = "xenapi"
+release = "1.329.0"
+weight = 25
++++
diff --git a/doc/content/xen-api/releases/21.2.0.md b/doc/content/xen-api/releases/21.2.0.md
new file mode 100644
index 00000000000..7346972ca00
--- /dev/null
+++ b/doc/content/xen-api/releases/21.2.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 21.2.0"
+layout = "release"
+type = "xenapi"
+release = "21.2.0"
+weight = 24
++++
diff --git a/doc/content/xen-api/releases/21.3.0.md b/doc/content/xen-api/releases/21.3.0.md
new file mode 100644
index 00000000000..041df4bea38
--- /dev/null
+++ b/doc/content/xen-api/releases/21.3.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 21.3.0"
+layout = "release"
+type = "xenapi"
+release = "21.3.0"
+weight = 23
++++
diff --git a/doc/content/xen-api/releases/21.4.0.md b/doc/content/xen-api/releases/21.4.0.md
new file mode 100644
index 00000000000..f16971c9283
--- /dev/null
+++ b/doc/content/xen-api/releases/21.4.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 21.4.0"
+layout = "release"
+type = "xenapi"
+release = "21.4.0"
+weight = 22
++++
diff --git a/doc/content/xen-api/releases/22.12.0.md b/doc/content/xen-api/releases/22.12.0.md
new file mode 100644
index 00000000000..be20881b901
--- /dev/null
+++ b/doc/content/xen-api/releases/22.12.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.12.0"
+layout = "release"
+type = "xenapi"
+release = "22.12.0"
+weight = 20
++++
diff --git a/doc/content/xen-api/releases/22.16.0.md b/doc/content/xen-api/releases/22.16.0.md
new file mode 100644
index 00000000000..8d0c5c0f0d5
--- /dev/null
+++ b/doc/content/xen-api/releases/22.16.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.16.0"
+layout = "release"
+type = "xenapi"
+release = "22.16.0"
+weight = 19
++++
diff --git a/doc/content/xen-api/releases/22.19.0.md b/doc/content/xen-api/releases/22.19.0.md
new file mode 100644
index 00000000000..024ab29aced
--- /dev/null
+++ b/doc/content/xen-api/releases/22.19.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.19.0"
+layout = "release"
+type = "xenapi"
+release = "22.19.0"
+weight = 18
++++
diff --git a/doc/content/xen-api/releases/22.20.0.md b/doc/content/xen-api/releases/22.20.0.md
new file mode 100644
index 00000000000..3e7c63b0e0b
--- /dev/null
+++ b/doc/content/xen-api/releases/22.20.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.20.0"
+layout = "release"
+type = "xenapi"
+release = "22.20.0"
+weight = 17
++++
diff --git a/doc/content/xen-api/releases/22.26.0.md b/doc/content/xen-api/releases/22.26.0.md
new file mode 100644
index 00000000000..cbc29030ef6
--- /dev/null
+++ b/doc/content/xen-api/releases/22.26.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.26.0"
+layout = "release"
+type = "xenapi"
+release = "22.26.0"
+weight = 16
++++
diff --git a/doc/content/xen-api/releases/22.27.0.md b/doc/content/xen-api/releases/22.27.0.md
new file mode 100644
index 00000000000..dd9ef24ace7
--- /dev/null
+++ b/doc/content/xen-api/releases/22.27.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.27.0"
+layout = "release"
+type = "xenapi"
+release = "22.27.0"
+weight = 15
++++
diff --git a/doc/content/xen-api/releases/22.33.0.md b/doc/content/xen-api/releases/22.33.0.md
new file mode 100644
index 00000000000..34c8533210a
--- /dev/null
+++ b/doc/content/xen-api/releases/22.33.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.33.0"
+layout = "release"
+type = "xenapi"
+release = "22.33.0"
+weight = 14
++++
diff --git a/doc/content/xen-api/releases/22.37.0.md b/doc/content/xen-api/releases/22.37.0.md
new file mode 100644
index 00000000000..9cebac8ab21
--- /dev/null
+++ b/doc/content/xen-api/releases/22.37.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.37.0"
+layout = "release"
+type = "xenapi"
+release = "22.37.0"
+weight = 13
++++
diff --git a/doc/content/xen-api/releases/22.5.0.md b/doc/content/xen-api/releases/22.5.0.md
new file mode 100644
index 00000000000..1103d45c800
--- /dev/null
+++ b/doc/content/xen-api/releases/22.5.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 22.5.0"
+layout = "release"
+type = "xenapi"
+release = "22.5.0"
+weight = 21
++++
diff --git a/doc/content/xen-api/releases/23.1.0.md b/doc/content/xen-api/releases/23.1.0.md
new file mode 100644
index 00000000000..8f100cb1cd5
--- /dev/null
+++ b/doc/content/xen-api/releases/23.1.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.1.0"
+layout = "release"
+type = "xenapi"
+release = "23.1.0"
+weight = 12
++++
diff --git a/doc/content/xen-api/releases/23.14.0.md b/doc/content/xen-api/releases/23.14.0.md
new file mode 100644
index 00000000000..c812f55d41f
--- /dev/null
+++ b/doc/content/xen-api/releases/23.14.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.14.0"
+layout = "release"
+type = "xenapi"
+release = "23.14.0"
+weight = 10
++++
diff --git a/doc/content/xen-api/releases/23.18.0.md b/doc/content/xen-api/releases/23.18.0.md
new file mode 100644
index 00000000000..0aa6ec85d11
--- /dev/null
+++ b/doc/content/xen-api/releases/23.18.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.18.0"
+layout = "release"
+type = "xenapi"
+release = "23.18.0"
+weight = 9
++++
diff --git a/doc/content/xen-api/releases/23.25.0.md b/doc/content/xen-api/releases/23.25.0.md
new file mode 100644
index 00000000000..4b5eeb7ff5d
--- /dev/null
+++ b/doc/content/xen-api/releases/23.25.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.25.0"
+layout = "release"
+type = "xenapi"
+release = "23.25.0"
+weight = 8
++++
diff --git a/doc/content/xen-api/releases/23.27.0.md b/doc/content/xen-api/releases/23.27.0.md
new file mode 100644
index 00000000000..196d2899e52
--- /dev/null
+++ b/doc/content/xen-api/releases/23.27.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.27.0"
+layout = "release"
+type = "xenapi"
+release = "23.27.0"
+weight = 7
++++
diff --git a/doc/content/xen-api/releases/23.30.0.md b/doc/content/xen-api/releases/23.30.0.md
new file mode 100644
index 00000000000..350058f958f
--- /dev/null
+++ b/doc/content/xen-api/releases/23.30.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.30.0"
+layout = "release"
+type = "xenapi"
+release = "23.30.0"
+weight = 6
++++
diff --git a/doc/content/xen-api/releases/23.9.0.md b/doc/content/xen-api/releases/23.9.0.md
new file mode 100644
index 00000000000..db1bb6bdd49
--- /dev/null
+++ b/doc/content/xen-api/releases/23.9.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 23.9.0"
+layout = "release"
+type = "xenapi"
+release = "23.9.0"
+weight = 11
++++
diff --git a/doc/content/xen-api/releases/24.0.0.md b/doc/content/xen-api/releases/24.0.0.md
new file mode 100644
index 00000000000..8f754b4f3ff
--- /dev/null
+++ b/doc/content/xen-api/releases/24.0.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 24.0.0"
+layout = "release"
+type = "xenapi"
+release = "24.0.0"
+weight = 5
++++
diff --git a/doc/content/xen-api/releases/24.10.0.md b/doc/content/xen-api/releases/24.10.0.md
new file mode 100644
index 00000000000..0f3eb490311
--- /dev/null
+++ b/doc/content/xen-api/releases/24.10.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 24.10.0"
+layout = "release"
+type = "xenapi"
+release = "24.10.0"
+weight = 3
++++
diff --git a/doc/content/xen-api/releases/24.14.0.md b/doc/content/xen-api/releases/24.14.0.md
new file mode 100644
index 00000000000..858f8119365
--- /dev/null
+++ b/doc/content/xen-api/releases/24.14.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 24.14.0"
+layout = "release"
+type = "xenapi"
+release = "24.14.0"
+weight = 2
++++
diff --git a/doc/content/xen-api/releases/24.16.0.md b/doc/content/xen-api/releases/24.16.0.md
new file mode 100644
index 00000000000..0852870afb2
--- /dev/null
+++ b/doc/content/xen-api/releases/24.16.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 24.16.0"
+layout = "release"
+type = "xenapi"
+release = "24.16.0"
+weight = 1
++++
diff --git a/doc/content/xen-api/releases/24.3.0.md b/doc/content/xen-api/releases/24.3.0.md
new file mode 100644
index 00000000000..8b94dae5839
--- /dev/null
+++ b/doc/content/xen-api/releases/24.3.0.md
@@ -0,0 +1,7 @@
++++
+title = "XAPI 24.3.0"
+layout = "release"
+type = "xenapi"
+release = "24.3.0"
+weight = 4
++++
diff --git a/doc/content/xen-api/releases/_index.md b/doc/content/xen-api/releases/_index.md
new file mode 100644
index 00000000000..ca69787c3b5
--- /dev/null
+++ b/doc/content/xen-api/releases/_index.md
@@ -0,0 +1,6 @@
++++
+title = "XenAPI Releases"
+weight = 150
++++
+
+{{% children %}}
\ No newline at end of file
diff --git a/doc/content/xen-api/releases/boston.md b/doc/content/xen-api/releases/boston.md
new file mode 100644
index 00000000000..a9c7f3ec670
--- /dev/null
+++ b/doc/content/xen-api/releases/boston.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.0"
+layout = "release"
+type = "xenapi"
+release = "boston"
+weight = 60
++++
diff --git a/doc/content/xen-api/releases/clearwater-felton.md b/doc/content/xen-api/releases/clearwater-felton.md
new file mode 100644
index 00000000000..5b96cf5bc23
--- /dev/null
+++ b/doc/content/xen-api/releases/clearwater-felton.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.2 SP1 Hotfix 4"
+layout = "release"
+type = "xenapi"
+release = "clearwater-felton"
+weight = 55
++++
diff --git a/doc/content/xen-api/releases/clearwater-whetstone.md b/doc/content/xen-api/releases/clearwater-whetstone.md
new file mode 100644
index 00000000000..7c08accf364
--- /dev/null
+++ b/doc/content/xen-api/releases/clearwater-whetstone.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.2 SP1 Hotfix 11"
+layout = "release"
+type = "xenapi"
+release = "clearwater-whetstone"
+weight = 54
++++
diff --git a/doc/content/xen-api/releases/clearwater.md b/doc/content/xen-api/releases/clearwater.md
new file mode 100644
index 00000000000..8c27b5c340e
--- /dev/null
+++ b/doc/content/xen-api/releases/clearwater.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.2"
+layout = "release"
+type = "xenapi"
+release = "clearwater"
+weight = 58
++++
diff --git a/doc/content/xen-api/releases/cowley.md b/doc/content/xen-api/releases/cowley.md
new file mode 100644
index 00000000000..29f45a0eddc
--- /dev/null
+++ b/doc/content/xen-api/releases/cowley.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 5.6 FP1"
+layout = "release"
+type = "xenapi"
+release = "cowley"
+weight = 61
++++
diff --git a/doc/content/xen-api/releases/cream.md b/doc/content/xen-api/releases/cream.md
new file mode 100644
index 00000000000..7f47c267f76
--- /dev/null
+++ b/doc/content/xen-api/releases/cream.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.5 SP1"
+layout = "release"
+type = "xenapi"
+release = "cream"
+weight = 52
++++
diff --git a/doc/content/xen-api/releases/creedence.md b/doc/content/xen-api/releases/creedence.md
new file mode 100644
index 00000000000..f0402dd697d
--- /dev/null
+++ b/doc/content/xen-api/releases/creedence.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.5"
+layout = "release"
+type = "xenapi"
+release = "creedence"
+weight = 53
++++
diff --git a/doc/content/xen-api/releases/dundee.md b/doc/content/xen-api/releases/dundee.md
new file mode 100644
index 00000000000..ce1fd600279
--- /dev/null
+++ b/doc/content/xen-api/releases/dundee.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.0"
+layout = "release"
+type = "xenapi"
+release = "dundee"
+weight = 50
++++
diff --git a/doc/content/xen-api/releases/ely.md b/doc/content/xen-api/releases/ely.md
new file mode 100644
index 00000000000..1371431e18f
--- /dev/null
+++ b/doc/content/xen-api/releases/ely.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.1"
+layout = "release"
+type = "xenapi"
+release = "ely"
+weight = 49
++++
diff --git a/doc/content/xen-api/releases/falcon.md b/doc/content/xen-api/releases/falcon.md
new file mode 100644
index 00000000000..15ecde78810
--- /dev/null
+++ b/doc/content/xen-api/releases/falcon.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.2"
+layout = "release"
+type = "xenapi"
+release = "falcon"
+weight = 48
++++
diff --git a/doc/content/xen-api/releases/george.md b/doc/content/xen-api/releases/george.md
new file mode 100644
index 00000000000..ef79369c327
--- /dev/null
+++ b/doc/content/xen-api/releases/george.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 5.5"
+layout = "release"
+type = "xenapi"
+release = "george"
+weight = 63
++++
diff --git a/doc/content/xen-api/releases/indigo.md b/doc/content/xen-api/releases/indigo.md
new file mode 100644
index 00000000000..cdb8b9cecf6
--- /dev/null
+++ b/doc/content/xen-api/releases/indigo.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.5 SP1 Hotfix 31"
+layout = "release"
+type = "xenapi"
+release = "indigo"
+weight = 51
++++
diff --git a/doc/content/xen-api/releases/inverness.md b/doc/content/xen-api/releases/inverness.md
new file mode 100644
index 00000000000..85560d9b161
--- /dev/null
+++ b/doc/content/xen-api/releases/inverness.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.3"
+layout = "release"
+type = "xenapi"
+release = "inverness"
+weight = 47
++++
diff --git a/doc/content/xen-api/releases/jura.md b/doc/content/xen-api/releases/jura.md
new file mode 100644
index 00000000000..751b36460de
--- /dev/null
+++ b/doc/content/xen-api/releases/jura.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.4"
+layout = "release"
+type = "xenapi"
+release = "jura"
+weight = 46
++++
diff --git a/doc/content/xen-api/releases/kolkata.md b/doc/content/xen-api/releases/kolkata.md
new file mode 100644
index 00000000000..624757ac680
--- /dev/null
+++ b/doc/content/xen-api/releases/kolkata.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.5"
+layout = "release"
+type = "xenapi"
+release = "kolkata"
+weight = 45
++++
diff --git a/doc/content/xen-api/releases/lima.md b/doc/content/xen-api/releases/lima.md
new file mode 100644
index 00000000000..f95d0af2877
--- /dev/null
+++ b/doc/content/xen-api/releases/lima.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 7.6"
+layout = "release"
+type = "xenapi"
+release = "lima"
+weight = 44
++++
diff --git a/doc/content/xen-api/releases/miami.md b/doc/content/xen-api/releases/miami.md
new file mode 100644
index 00000000000..5ca0dc1e236
--- /dev/null
+++ b/doc/content/xen-api/releases/miami.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 4.1"
+layout = "release"
+type = "xenapi"
+release = "miami"
+weight = 67
++++
diff --git a/doc/content/xen-api/releases/midnight-ride.md b/doc/content/xen-api/releases/midnight-ride.md
new file mode 100644
index 00000000000..9a8dca612e5
--- /dev/null
+++ b/doc/content/xen-api/releases/midnight-ride.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 5.6"
+layout = "release"
+type = "xenapi"
+release = "midnight-ride"
+weight = 62
++++
diff --git a/doc/content/xen-api/releases/naples.md b/doc/content/xen-api/releases/naples.md
new file mode 100644
index 00000000000..6c5bdfd30d3
--- /dev/null
+++ b/doc/content/xen-api/releases/naples.md
@@ -0,0 +1,7 @@
++++
+title = "Citrix Hypervisor 8.0"
+layout = "release"
+type = "xenapi"
+release = "naples"
+weight = 43
++++
diff --git a/doc/content/xen-api/releases/nile-preview.md b/doc/content/xen-api/releases/nile-preview.md
new file mode 100644
index 00000000000..9b7eee9e675
--- /dev/null
+++ b/doc/content/xen-api/releases/nile-preview.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 8 Preview"
+layout = "release"
+type = "xenapi"
+release = "nile-preview"
+weight = 39
++++
diff --git a/doc/content/xen-api/releases/orlando-update-1.md b/doc/content/xen-api/releases/orlando-update-1.md
new file mode 100644
index 00000000000..2bcd4892b6d
--- /dev/null
+++ b/doc/content/xen-api/releases/orlando-update-1.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 5.0 Update 1"
+layout = "release"
+type = "xenapi"
+release = "orlando-update-1"
+weight = 64
++++
diff --git a/doc/content/xen-api/releases/orlando.md b/doc/content/xen-api/releases/orlando.md
new file mode 100644
index 00000000000..6d348d7d1ce
--- /dev/null
+++ b/doc/content/xen-api/releases/orlando.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 5.0"
+layout = "release"
+type = "xenapi"
+release = "orlando"
+weight = 65
++++
diff --git a/doc/content/xen-api/releases/quebec.md b/doc/content/xen-api/releases/quebec.md
new file mode 100644
index 00000000000..72e56f85156
--- /dev/null
+++ b/doc/content/xen-api/releases/quebec.md
@@ -0,0 +1,7 @@
++++
+title = "Citrix Hypervisor 8.1"
+layout = "release"
+type = "xenapi"
+release = "quebec"
+weight = 42
++++
diff --git a/doc/content/xen-api/releases/rio.md b/doc/content/xen-api/releases/rio.md
new file mode 100644
index 00000000000..2ab3a3c295e
--- /dev/null
+++ b/doc/content/xen-api/releases/rio.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 4.0"
+layout = "release"
+type = "xenapi"
+release = "rio"
+weight = 68
++++
diff --git a/doc/content/xen-api/releases/stockholm.md b/doc/content/xen-api/releases/stockholm.md
new file mode 100644
index 00000000000..18a52ec9c8a
--- /dev/null
+++ b/doc/content/xen-api/releases/stockholm.md
@@ -0,0 +1,7 @@
++++
+title = "Citrix Hypervisor 8.2"
+layout = "release"
+type = "xenapi"
+release = "stockholm"
+weight = 41
++++
diff --git a/doc/content/xen-api/releases/stockholm_psr.md b/doc/content/xen-api/releases/stockholm_psr.md
new file mode 100644
index 00000000000..17000a4c899
--- /dev/null
+++ b/doc/content/xen-api/releases/stockholm_psr.md
@@ -0,0 +1,7 @@
++++
+title = "Citrix Hypervisor 8.2 Hotfix 2"
+layout = "release"
+type = "xenapi"
+release = "stockholm_psr"
+weight = 40
++++
diff --git a/doc/content/xen-api/releases/symc.md b/doc/content/xen-api/releases/symc.md
new file mode 100644
index 00000000000..9d1d20e7314
--- /dev/null
+++ b/doc/content/xen-api/releases/symc.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 4.1.1"
+layout = "release"
+type = "xenapi"
+release = "symc"
+weight = 66
++++
diff --git a/doc/content/xen-api/releases/tampa.md b/doc/content/xen-api/releases/tampa.md
new file mode 100644
index 00000000000..b4182ff588b
--- /dev/null
+++ b/doc/content/xen-api/releases/tampa.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.1"
+layout = "release"
+type = "xenapi"
+release = "tampa"
+weight = 59
++++
diff --git a/doc/content/xen-api/releases/vgpu-productisation.md b/doc/content/xen-api/releases/vgpu-productisation.md
new file mode 100644
index 00000000000..76153a84ded
--- /dev/null
+++ b/doc/content/xen-api/releases/vgpu-productisation.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.2 SP1"
+layout = "release"
+type = "xenapi"
+release = "vgpu-productisation"
+weight = 56
++++
diff --git a/doc/content/xen-api/releases/vgpu-tech-preview.md b/doc/content/xen-api/releases/vgpu-tech-preview.md
new file mode 100644
index 00000000000..e3d7dd13b32
--- /dev/null
+++ b/doc/content/xen-api/releases/vgpu-tech-preview.md
@@ -0,0 +1,7 @@
++++
+title = "XenServer 6.2 SP1 Tech-Preview"
+layout = "release"
+type = "xenapi"
+release = "vgpu-tech-preview"
+weight = 57
++++
diff --git a/doc/content/xen-api/topics/_index.md b/doc/content/xen-api/topics/_index.md
new file mode 100644
index 00000000000..397bcf171a4
--- /dev/null
+++ b/doc/content/xen-api/topics/_index.md
@@ -0,0 +1,6 @@
++++
+title = "Topics"
+weight = 200
++++
+
+{{% children %}}
diff --git a/doc/content/xen-api/topics/consoles.md b/doc/content/xen-api/topics/consoles.md
new file mode 100644
index 00000000000..e478791320e
--- /dev/null
+++ b/doc/content/xen-api/topics/consoles.md
@@ -0,0 +1,96 @@
+---
+title: VM consoles
+layout: default
+---
+
+Most XenAPI graphical interfaces will want to gain access to the VM consoles, in order to render them to the user as if they were physical machines. There are several types of consoles available, depending on the type of guest or if the physical host console is being accessed:
+
+Types of consoles
+=================
+
+|Operating System|Text|Graphical|Optimized graphical|
+|:---------------|:---|:--------|:------------------|
+|Windows|No|VNC, using an API call|RDP, directly from guest|
+|Linux|Yes, through VNC and an API call|No|VNC, directly from guest|
+|Physical Host|Yes, through VNC and an API call|No|No|
+
+Hardware-assisted VMs, such as Windows, directly provide a graphical console over VNC. There is no text-based console, and guest networking is not necessary to use the graphical console. Once guest networking has been established, it is more efficient to setup Remote Desktop Access and use an RDP client to connect directly (this must be done outside of the XenAPI).
+
+Paravirtual VMs, such as Linux guests, provide a native text console directly. XenServer provides a utility (called `vncterm`) to convert this text-based console into a graphical VNC representation. Guest networking is not necessary for this console to function. As with Windows above, Linux distributions often configure VNC within the guest, and directly connect to it over a guest network interface.
+
+The physical host console is only available as a `vt100` console, which is exposed through the XenAPI as a VNC console by using `vncterm` in the control domain.
+
+RFB (Remote Framebuffer) is the protocol which underlies VNC, specified in [The RFB Protocol](http://www.realvnc.com/docs/rfbproto.pdf). Third-party developers are expected to provide their own VNC viewers, and many freely available implementations can be adapted for this purpose. RFB 3.3 is the minimum version which viewers must support.
+
+Retrieving VNC consoles using the API
+=====================================
+
+VNC consoles are retrieved using a special URL passed through to the host agent. The sequence of API calls is as follows:
+
+1. Client to Master/443: XML-RPC: `Session.login_with_password()`.
+
+2. Master/443 to Client: Returns a session reference to be used with subsequent calls.
+
+3. Client to Master/443: XML-RPC: `VM.get_by_name_label()`.
+
+4. Master/443 to Client: Returns a reference to a particular VM (or the "control domain" if you want to retrieve the physical host console).
+
+5. Client to Master/443: XML-RPC: `VM.get_consoles()`.
+
+6. Master/443 to Client: Returns a list of console objects associated with the VM.
+
+7. Client to Master/443: XML-RPC: `VM.get_location()`.
+
+8. Returns a URI describing where the requested console is located. The URIs are of the form: `https://192.168.0.1/console?ref=OpaqueRef:c038533a-af99-a0ff-9095-c1159f2dc6a0`.
+
+9. Client to 192.168.0.1: HTTP CONNECT "/console?ref=(...)"
+
+The final HTTP CONNECT is slightly non-standard since the HTTP/1.1 RFC specifies that it should only be a host and a port, rather than a URL. Once the HTTP connect is complete, the connection can subsequently directly be used as a VNC server without any further HTTP protocol action.
+
+This scheme requires direct access from the client to the control domain's IP, and will not work correctly if there are Network Address Translation (NAT) devices blocking such connectivity. You can use the CLI to retrieve the console URI from the client and perform a connectivity check.
+
+Retrieve the VM UUID by running:
+
+```sh
+$ VM=$(xe vm-list params=uuid --minimal name-label=
)
+```
+
+Retrieve the console information:
+
+```sh
+$ xe console-list vm-uuid=$VM
+uuid ( RO) : 8013b937-ff7e-60d1-ecd8-e52d66c5879e
+ vm-uuid ( RO): 2d7c558a-8f03-b1d0-e813-cbe7adfa534c
+ vm-name-label ( RO): 6
+ protocol ( RO): RFB
+ location ( RO): https://10.80.228.30/console?uuid=8013b937-ff7e-60d1-ecd8-e52d66c5879e
+```
+
+Use command-line utilities like `ping` to test connectivity to the IP address provided in the `location` field.
+
+Disabling VNC forwarding for Linux VM
+=====================================
+
+When creating and destroying Linux VMs, the host agent automatically manages the `vncterm` processes which convert the text console into VNC. Advanced users who wish to directly access the text console can disable VNC forwarding for that VM. The text console can then only be accessed directly from the control domain directly, and graphical interfaces such as XenCenter will not be able to render a console for that VM.
+
+Before starting the guest, set the following parameter on the VM record:
+
+```sh
+$ xe vm-param-set uuid=$VM other-config:disable_pv_vnc=1
+```
+
+Start the VM.
+
+Use the CLI to retrieve the underlying domain ID of the VM with:
+
+```sh
+$ DOMID=$(xe vm-list params=dom-id uuid=$VM --minimal)
+```
+
+On the host console, connect to the text console directly by:
+
+```sh
+$ /usr/lib/xen/bin/xenconsole $DOMID
+```
+
+This configuration is an advanced procedure, and we do not recommend that the text console is directly used for heavy I/O operations. Instead, connect to the guest over SSH or some other network-based connection mechanism.
diff --git a/doc/content/xen-api/topics/guest-agents.md b/doc/content/xen-api/topics/guest-agents.md
new file mode 100644
index 00000000000..0a745db3478
--- /dev/null
+++ b/doc/content/xen-api/topics/guest-agents.md
@@ -0,0 +1,26 @@
+---
+title: Guest agents
+layout: default
+---
+
+"Guest agents" are special programs which run inside VMs which can be controlled
+via the XenAPI.
+
+One communication method between XenAPI clients is via Xenstore.
+
+Adding Xenstore entries to VMs
+------------------------------
+
+Developers may wish to install guest agents into VMs which take special action based on the type of the VM. In order to communicate this information into the guest, a special Xenstore name-space known as `vm-data` is available which is populated at VM creation time. It is populated from the `xenstore-data` map in the VM record.
+
+Set the `xenstore-data` parameter in the VM record:
+
+ xe vm-param-set uuid= xenstore-data:vm-data/foo=bar
+
+Start the VM.
+
+If it is a Linux-based VM, install the COMPANY\_TOOLS and use the `xenstore-read` to verify that the node exists in Xenstore.
+
+> **Note**
+>
+> Only prefixes beginning with `vm-data` are permitted, and anything not in this name-space will be silently ignored when starting the VM.
diff --git a/doc/content/xen-api/topics/importexport.md b/doc/content/xen-api/topics/importexport.md
new file mode 100644
index 00000000000..e434c2ae3cd
--- /dev/null
+++ b/doc/content/xen-api/topics/importexport.md
@@ -0,0 +1,212 @@
+---
+title: VM import/export
+layout: default
+---
+
+VMs can be exported to a file and later imported to any Xapi host. The export
+protocol is a simple HTTP(S) GET, which should be sent to the Pool master.
+Authorization is either via a pre-created `session_id` or by HTTP basic
+authentication (particularly useful on the command-line).
+The VM to export is specified either by UUID or by reference. To keep track of
+the export, a task can be created and passed in using its reference. Note that
+Xapi may send an HTTP redirect if a different host has better access to the
+disk data.
+
+The following arguments are passed as URI query parameters or HTTP cookies:
+
+Argument | Description
+----------------|---------------------------------------------------------
+session_id | the reference of the session being used to authenticate; required only when not using HTTP basic authentication
+task_id | the reference of the task object with which to keep track of the operation; optional, required only if you have created a task object to keep track of the export
+ref | the reference of the VM; required only if not using the UUID
+uuid | the UUID of the VM; required only if not using the reference
+use_compression | an optional boolean "true" or "false" (defaulting to "false"). If "true" then the output will be gzip-compressed before transmission.
+
+
+For example, using the Linux command line tool cURL:
+
+```sh
+$ curl http://root:foo@myxenserver1/export?uuid= -o
+```
+
+will export the specified VM to the file `exportfile`.
+
+To export just the metadata, use the URI `http://server/export_metadata`.
+
+The import protocol is similar, using HTTP(S) PUT. The `session_id` and `task_id` arguments are as for the export. The `ref` and `uuid` are not used; a new reference and uuid will be generated for the VM. There are some additional parameters:
+
+Argument | Description
+------------|---------------------------------------------------------
+restore | if `true`, the import is treated as replacing the original VM - the implication of this currently is that the MAC addresses on the VIFs are exactly as the export was, which will lead to conflicts if the original VM is still being run.
+force | if `true`, any checksum failures will be ignored (the default is to destroy the VM if a checksum error is detected)
+sr_id | the reference of an SR into which the VM should be imported. The default behavior is to import into the `Pool.default_SR`
+
+Note there is no need to specify whether the export is compressed, as Xapi
+will automatically detect and decompress gzip-encoded streams.
+
+For example, again using cURL:
+
+```sh
+curl -T http://root:foo@myxenserver2/import
+```
+
+will import the VM to the default SR on the server.
+
+> **Note**
+>
+> Note that if no default SR has been set, and no `sr_uuid` is specified, the error message `DEFAULT_SR_NOT_FOUND` is returned.
+
+Another example:
+
+```sh
+curl -T http://root:foo@myxenserver2/import?sr_id=
+```
+
+will import the VM to the specified SR on the server.
+
+To import just the metadata, use the URI `http://server/import_metadata`
+
+Legacy VM Import Format
+=======================
+
+This section describes the legacy VM import/export format and is for historical
+interest only. It should be updated to describe the current format, see
+[issue 64](https://github.com/xapi-project/xapi-project.github.io/issues/64)
+
+
+Xapi supports a human-readable legacy VM input format called XVA. This section describes the syntax and structure of XVA.
+
+An XVA consists of a directory containing XML metadata and a set of disk images. A VM represented by an XVA is not intended to be directly executable. Data within an XVA package is compressed and intended for either archiving on permanent storage or for being transmitted to a VM server - such as a XenServer host - where it can be decompressed and executed.
+
+XVA is a hypervisor-neutral packaging format; it should be possible to create simple tools to instantiate an XVA VM on any other platform. XVA does not specify any particular runtime format; for example disks may be instantiated as file images, LVM volumes, QCoW images, VMDK or VHD images. An XVA VM may be instantiated any number of times, each instantiation may have a different runtime format.
+
+XVA does not:
+
+- specify any particular serialization or transport format
+
+- provide any mechanism for customizing VMs (or templates) on install
+
+- address how a VM may be upgraded post-install
+
+- define how multiple VMs, acting as an appliance, may communicate
+
+These issues are all addressed by the related Open Virtual Appliance specification.
+
+An XVA is a directory containing, at a minimum, a file called `ova.xml`. This file describes the VM contained within the XVA and is described in Section 3.2. Disks are stored within sub-directories and are referenced from the ova.xml. The format of disk data is described later in Section 3.3.
+
+The following terms will be used in the rest of the chapter:
+
+- HVM: a mode in which unmodified OS kernels run with the help of virtualization support in the hardware.
+
+- PV: a mode in which specially modified "paravirtualized" kernels run explicitly on top of a hypervisor without requiring hardware support for virtualization.
+
+The "ova.xml" file contains the following elements:
+
+```xml
+
+```
+
+The number in the attribute "version" indicates the version of this specification to which the XVA is constructed; in this case version 0.1. Inside the \ there is exactly one \: (in the OVA specification, multiple \s are permitted)
+
+```xml
+
+```
+
+Each `` element describes one VM. The "name" attribute is for future internal use only and must be unique within the ova.xml file. The "name" attribute is permitted to be any valid UTF-8 string. Inside each \ tag are the following compulsory elements:
+
+```xml
+... text ...
+```
+
+A short name for the VM to be displayed in a UI.
+
+```xml
+ ... description ...
+```
+
+A description for the VM to be displayed in the UI. Note that for both `` and `` contents, leading and trailing whitespace will be ignored.
+
+```xml
+
+```
+
+The `` element has attributes which describe the amount of memory in bytes (`mem_set`) and number of CPUs (VCPUs) the VM should have.
+
+Each `` has zero or more `` elements representing block devices which look like the following:
+
+```xml
+
+```
+
+The attributes have the following meanings:
+
+* `device`: name of the physical device to expose to the VM. For linux guests
+ we use "sd[a-z]" and for windows guests we use "hd[a-d]".
+* `function`: if marked as "root", this disk will be used to boot the guest.
+ (NB this does not imply the existence of the Linux root i.e. / filesystem)
+ Only one device should be marked as "root". See Section 3.4 describing VM
+ booting. Any other string is ignored.
+* `mode`: either "w" or "ro" if the device is to be read/write or read-only
+* `vdi`: the name of the disk image (represented by a `` element) to which
+ this block device is connected
+
+Each `` may have an optional `` section like the following:
+
+```xml
+
+```
+
+The `` element will be removed in future. The attribute `is_hvm` is
+either `true` or `false`, depending on whether the VM should be booted in HVM or not.
+The `kernel_boot_cmdline` contains additional kernel commandline arguments when
+booting a guest using pygrub.
+
+In addition to a `` element, the `` will contain zero or more
+`` elements like the following:
+
+```xml
+
+```
+
+Each `` corresponds to a disk image. The attributes have the following meanings:
+
+* `name`: name of the VDI, referenced by the vdi attribute of ``elements.
+ Any valid UTF-8 string is permitted.
+* `size`: size of the required image in bytes
+* `source`: a URI describing where to find the data for the image, only
+ file:// URIs are currently permitted and must describe paths relative to the
+ directory containing the ova.xml
+* `type`: describes the format of the disk data
+
+A single disk image encoding is specified in which has type "dir-gzipped-chunks": Each image is represented by a directory containing a sequence of files as follows:
+
+```sh
+-rw-r--r-- 1 dscott xendev 458286013 Sep 18 09:51 chunk000000000.gz
+-rw-r--r-- 1 dscott xendev 422271283 Sep 18 09:52 chunk000000001.gz
+-rw-r--r-- 1 dscott xendev 395914244 Sep 18 09:53 chunk000000002.gz
+-rw-r--r-- 1 dscott xendev 9452401 Sep 18 09:53 chunk000000003.gz
+-rw-r--r-- 1 dscott xendev 1096066 Sep 18 09:53 chunk000000004.gz
+-rw-r--r-- 1 dscott xendev 971976 Sep 18 09:53 chunk000000005.gz
+-rw-r--r-- 1 dscott xendev 971976 Sep 18 09:53 chunk000000006.gz
+-rw-r--r-- 1 dscott xendev 971976 Sep 18 09:53 chunk000000007.gz
+-rw-r--r-- 1 dscott xendev 573930 Sep 18 09:53 chunk000000008.gz
+```
+
+Each file (named "chunk-XXXXXXXXX.gz") is a gzipped file containing exactly 1e9 bytes (1GB, not 1GiB) of raw block data. The small size was chosen to be safely under the maximum file size limits of several filesystems. If the files are gunzipped and then concatenated together, the original image is recovered.
+
+Because the import and export of VMs can take some time to complete, an
+asynchronous HTTP interface to the import and export operations is
+provided. To perform an export using the XenServer API, construct
+an HTTP GET call providing a valid session ID, task ID and VM UUID, as
+shown in the following pseudo code:
+
+ task = Task.create()
+ result = HTTP.get(
+ server, 80, "/export?session_id=&task_id=&ref=");
+
+For the import operation, use an HTTP PUT call as demonstrated in the
+following pseudo code:
+
+ task = Task.create()
+ result = HTTP.put(
+ server, 80, "/import?session_id=&task_id=&ref=");
diff --git a/doc/content/xen-api/topics/memory.md b/doc/content/xen-api/topics/memory.md
new file mode 100644
index 00000000000..b69baad43af
--- /dev/null
+++ b/doc/content/xen-api/topics/memory.md
@@ -0,0 +1,23 @@
+---
+title: Memory
+layout: xenapi
+xenapi_tag: memory
+---
+
+Memory is used for many things:
+
+- the hypervisor code: this is the Xen executable itself
+- the hypervisor heap: this is needed for per-domain structures and per-vCPU
+ structures
+- the crash kernel: this is needed to collect information after a host crash
+- domain RAM: this is the memory the VM believes it has
+- shadow memory: for HVM guests running on hosts without hardware assisted
+ paging (HAP) Xen uses shadow to optimise page table updates. For all guests
+ shadow is used during live migration for tracking the memory transfer.
+- video RAM for the virtual graphics card
+
+Some of these are constants (e.g. hypervisor code) while some depend on the VM
+configuration (e.g. domain RAM). Xapi calls the constants "host overhead" and
+the variables due to VM configuration as "VM overhead".
+These overheads are subtracted from free memory on the host when starting,
+resuming and migrating VMs.
diff --git a/doc/content/xen-api/topics/metrics.md b/doc/content/xen-api/topics/metrics.md
new file mode 100644
index 00000000000..f29a9e07982
--- /dev/null
+++ b/doc/content/xen-api/topics/metrics.md
@@ -0,0 +1,251 @@
+---
+title: Metrics
+layout: default
+---
+
+[xcp-rrdd](https://github.com/xapi-project/xen-api/ocaml/xcp-rrdd)
+records statistics about the host and the VMs running on top.
+The metrics are stored persistently for long-term access and analysis of
+historical trends.
+Statistics are stored in [RRDs](http://oss.oetiker.ch/rrdtool/) (Round Robin
+Databases).
+RRDs are fixed-size structures that store time series with decreasing time
+resolution: the older the data point is, the longer the timespan it represents.
+'Data sources' are sampled every few seconds and points are added to
+the highest resolution RRD. Periodically each high-frequency RRD is
+'consolidated' (e.g. averaged) to produce a data point for a lower-frequency
+RRD.
+
+RRDs are resident on the host on which the VM is running, or the pool
+coordinator when the VM is not running.
+The RRDs are backed up every day.
+
+Granularity
+-----------
+
+Statistics are persisted for a maximum of one year, and are stored at
+different granularities.
+The average and most recent values are stored at intervals of:
+
+- five seconds for the past ten minutes
+- one minute for the past two hours
+- one hour for the past week
+- one day for the past year
+
+RRDs are saved to disk as uncompressed XML. The size of each RRD when
+written to disk ranges from 200KiB to approximately 1.2MiB when the RRD
+stores the full year of statistics.
+
+By default each RRD contains only averaged data to save storage space.
+To record minimum and maximum values in future RRDs, set the Pool-wide flag
+
+```sh
+xe pool-param-set uuid= other-config:create_min_max_in_new_VM_RRDs=true
+```
+
+Downloading
+===========
+
+Statistics can be downloaded over HTTP in XML or JSON format, for example
+using `wget`.
+See [rrddump](http://oss.oetiker.ch/rrdtool/doc/rrddump.en.html) and
+[rrdxport](http://oss.oetiker.ch/rrdtool/doc/rrdxport.en.html) for information
+about the XML format.
+The JSON format has the same structure as the XML.
+Parameters are appended to the URL following a question mark (?) and separated
+by ampersands (&).
+HTTP authentication can take the form of a username and password or a session
+token in a URL parameter.
+
+Statistics may be downloaded all at once, including all history, or as
+deltas suitable for interactive graphing.
+
+Downloading statistics all at once
+----------------------------------
+
+To obtain a full dump of RRD data for a host use:
+
+```sh
+wget http://hostname/host_rrd?session_id=OpaqueRef:43df3204-9360-c6ab-923e-41a8d19389ba"
+```
+
+where the session token has been fetched from the server using the API.
+
+For example, using Python's [XenAPI](https://pypi.org/project/XenAPI/) library:
+
+```python
+import XenAPI
+username = "root"
+password = "actual_password"
+url = "http://hostname"
+session = XenAPI.Session(url)
+session.xenapi.login_with_password(username, password, "1.0", "session_getter")
+session._session
+```
+
+A URL parameter is used to decide which format to return: XML is returned by
+default, adding the parameter `json` makes the server return JSON.
+Starting from xapi version 23.17.0, the server uses the HTTP header `Accept`
+to decide which format to return.
+When both formats are accepted, for example, using `*/*`; JSON is returned.
+Of interest are the clients wget and curl which use this accept header value,
+meaning that when using them the default behaviour will change and the accept
+header needs to be overridden to make the server return XML.
+The content type is provided in the reponse's headers in these newer versions.
+
+The XML RRD data is in the format used by rrdtool and looks like this:
+
+```xml
+
+
+ 0003
+ 5
+ 1213616574
+
+ memory_total_kib
+ GAUGE
+ 300.0000
+ 0.0
+ Infinity
+ 2070172
+ 9631315.6300
+ 0
+
+
+
+
+
+ AVERAGE
+ 1
+
+ 0.5000
+
+
+
+ 0.0
+ 0.0
+ 0.0
+ 0
+
+ ...other dss - internal use only...
+
+
+
+ 2070172.0000
+ 1756408.0000
+ 0.0
+ 0.0
+ 732.2130
+ 0.0
+ 782.9186
+ 0.0
+ 647.0431
+ 0.0
+ 0.0001
+ 0.0268
+ 0.0100
+ 0.0
+ 615.1072
+
+ ...
+
+ ... other archives ...
+
+
+```
+
+To obtain a full dump of RRD data of a VM with uuid `x`:
+
+```sh
+wget "http://hostname/vm_rrd?session_id=&uuid=x"
+```
+
+Note that it is quite expensive to download full RRDs as they contain
+lots of historical information. For interactive displays clients should
+download deltas instead.
+
+
+Downloading deltas
+------------------
+
+To obtain an update of all VM statistics on a host, the URL would be of
+the form:
+
+```sh
+wget "https://hostname/rrd_updates?session_id=&start="
+```
+
+This request returns data in an rrdtool `xport` style XML format, for every VM
+resident on the particular host that is being queried.
+To differentiate which column in the export is associated with which VM, the
+`legend` field is prefixed with the UUID of the VM.
+
+An example `rrd_updates` output:
+
+```xml
+
+
+ 1213578000
+ 3600
+ 1213617600
+ 12
+ 12
+
+ AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1
+ AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0
+ AVERAGE:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory
+ MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1
+ MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0
+ MIN:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory
+ MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1
+ MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0
+ MAX:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory
+ LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu1
+ LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:cpu0
+ LAST:vm:ecd8d7a0-1be3-4d91-bd0e-4888c0e30ab3:memory
+
+
+
+
+ 1213617600
+ 0.0
+ 0.0282
+ 209715200.0000
+ 0.0
+ 0.0201
+ 209715200.0000
+ 0.0
+ 0.0445
+ 209715200.0000
+ 0.0
+ 0.0243
+ 209715200.0000
+
+ ...
+
+
+```
+
+To obtain host updates too, use the query parameter `host=true`:
+
+```sh
+wget "http://hostname/rrd_updates?session_id=&start=&host=true"
+```
+
+The step will decrease as the period decreases, which means that if you
+request statistics for a shorter time period you will get more detailed
+statistics.
+
+To download updates containing only the averages, or minimums or maximums,
+add the parameter `cf=AVERAGE|MIN|MAX` (note case is important) e.g.
+
+```sh
+wget "http://hostname/rrd_updates?session_id=&start=0&cf=MAX"
+```
+
+To request a different update interval, add the parameter `interval=seconds` e.g.
+
+```sh
+wget "http://hostname/rrd_updates?session_id=&start=0&interval=5"
+```
diff --git a/doc/content/xen-api/topics/snapshots.md b/doc/content/xen-api/topics/snapshots.md
new file mode 100644
index 00000000000..91b5969ce42
--- /dev/null
+++ b/doc/content/xen-api/topics/snapshots.md
@@ -0,0 +1,260 @@
+---
+title: Snapshots
+layout: xenapi
+xenapi_tag: snapshots
+---
+
+Snapshots represent the state of a VM, or a disk (VDI) at a point in time.
+They can be used for:
+
+- backups (hourly, daily, weekly etc)
+- experiments (take snapshot, try something, revert back again)
+- golden images (install OS, get it just right, clone it 1000s of times)
+
+Read more about [Snapshots: the High-Level Feature](../features/snapshots/snapshots.html).
+
+Taking a VDI snapshot
+=====================
+
+To take a snapshot of a single disk (VDI):
+
+```
+snapshot_vdi <- VDI.snapshot(session_id, vdi, driver_params)
+```
+
+where `vdi` is the reference to the disk to be snapshotted, and `driver_params`
+is a list of string pairs providing optional backend implementation-specific hints.
+The snapshot operation should be quick (i.e. it should never be implemented as
+a slow disk copy) and the resulting VDI will have
+
+Field name | Description
+---------------|------------------------------------------------------
+is_a_snapshot | a flag, set to true, indicating the disk is a snapshot
+snapshot_of | a reference to the disk the snapshot was created from
+snapshot_time | the time the snapshot was taken
+
+The resulting snapshot should be considered read-only. Depending on the backend
+implementation it may be technically possible to write to the snapshot, but clients
+must not do this. To create a writable disk from a snapshot, see "restoring from
+a snapshot" below.
+
+Note that the storage backend is free to implement this in different ways. We
+do not assume the presence of a .vhd-formatted storage repository. Clients
+must never assume anything about the backend implementation without checking
+first with the maintainers of the backend implementation.
+
+Restoring to a VDI snapshot
+===========================
+
+To restore from a VDI snapshot first
+
+```
+new_vdi <- VDI.clone(session_id, snapshot_vdi, driver_params)
+```
+
+where `snapshot_vdi` is a reference to the snapshot VDI, and `driver_params`
+is a list of string pairs providing optional backend implementation-specific hints.
+The clone operation should be quick (i.e. it should never be implemented as
+a slow disk copy) and the resulting VDI will have
+
+Field name | Description
+---------------|------------------------------------------------------------
+is_a_snapshot | a flag, set to false, indicating the disk is not a snapshot
+snapshot_of | an invalid reference
+snapshot_time | an invalid time
+
+The resulting disk is writable and can be used by the client as normal.
+
+Note that the "restored" VDI will have a different `VDI.uuid` and reference to
+the original VDI.
+
+Taking a VM snapshot
+====================
+
+A VM snapshot is a copy of the VM metadata and a snapshot of all the associated
+VDIs at around the same point in time. To take a VM snapshot:
+
+```
+snapshot_vm <- VM.snapshot(session_id, vm, new_name)
+```
+
+where `vm` is a reference to the existing VM and `new_name` will be the `name_label`
+of the resulting VM (snapshot) object. The resulting VM will have
+
+Field name | Description
+---------------|------------------------------------------------------
+is_a_snapshot | a flag, set to true, indicating the VM is a snapshot
+snapshot_of | a reference to the VM the snapshot was created from
+snapshot_time | the time the snapshot was taken
+
+Note that each disk is snapshotted one-by-one and not at the same time.
+
+Restoring to a VM snapshot
+==========================
+
+A VM snapshot can be reverted to a snapshot using
+
+```
+VM.revert(session_id, snapshot_ref)
+```
+
+where `snapshot_ref` is a reference to the snapshot VM. Each VDI associated with
+the VM before the snapshot will be destroyed and each VDI associated with the
+snapshot will be cloned (see "Reverting to a disk snapshot" above) and associated
+with the VM. The resulting VM will have
+
+Field name | Description
+---------------|----------------------------------------------------------
+is_a_snapshot | a flag, set to false, indicating the VM is not a snapshot
+snapshot_of | an invalid reference
+snapshot_time | an invalid time
+
+Note that the `VM.uuid` and reference are preserved, but the `VDI.uuid` and
+VDI references are not.
+
+Downloading a disk or snapshot
+==============================
+
+Disks can be downloaded in either raw or vhd format using an HTTP 1.0 GET
+request as follows:
+
+```
+GET /export_raw_vdi?session_id=%s&task_id=%s&vdi=%s&format=%s[&base=%s] HTTP/1.0\r\n
+Connection: close\r\n
+\r\n
+\r\n
+```
+
+where
+
+- `session_id` is a currently logged-in session
+- `task_id` is a `Task` reference which will be used to monitor the
+ progress of this task and receive errors from it
+- `vdi` is the reference of the `VDI` into which the data will be
+ imported
+- `format` is either `vhd` or `raw`
+- (optional) `base` is the reference of a `VDI` which has already been
+ exported and this export should only contain the blocks which have changed
+ since then.
+
+Note that the vhd format allows the disk to be sparse i.e. only contain allocated
+blocks. This helps reduce the size of the download.
+
+The xapi-project/xen-api repo has a
+[python download example](https://github.com/xapi-project/xen-api/blob/19afd3dfe8883814e525ce7ce39c8c959ce3c924/scripts/examples/python/exportimport.py#L32)
+
+Uploading a disk or snapshot
+=============================
+
+Disks can be uploaded in either raw or vhd format using an HTTP 1.0 PUT
+request as follows:
+
+```
+PUT /import_raw_vdi?session_id=%s&task_id=%s&vdi=%s&format=%s HTTP/1.0\r\n
+Connection: close\r\n
+\r\n
+\r\n
+```
+
+where
+
+- `session_id` is a currently logged-in session
+- `task_id` is a `Task` reference which will be used to monitor the
+ progress of this task and receive errors from it
+- `vdi` is the reference of the `VDI` into which the data will be
+ imported
+- `format` is either `vhd` or `raw`
+
+Note that you must create the disk (with the correct size) before importing
+data to it. The disk doesn't have to be empty, in fact if restoring from a
+series of incremental downloads it makes sense to upload them all to the
+same disk in order.
+
+Example: incremental backup with xe
+===================================
+
+This section will show how easy it is to build an incremental backup
+tool using these APIs. For simplicity we will use the `xe` commands
+rather than raw XMLRPC and HTTP.
+
+For a VDI with uuid $VDI, take a snapshot:
+
+```sh
+FULL=$(xe vdi-snapshot uuid=$VDI)
+```
+
+Next perform a full backup into a file "full.vhd", in vhd format:
+
+```sh
+xe vdi-export uuid=$FULL filename=full.vhd format=vhd --progress
+```
+
+If the SR was using the vhd format internally (this is the default)
+then the full backup will be sparse and will only contain blocks if they
+have been written to.
+
+After some time has passed and the VDI has been written to, take another
+snapshot:
+
+```sh
+DELTA=$(xe vdi-snapshot uuid=$VDI)
+```
+
+Now we can backup only the disk blocks which have changed between the original
+snapshot $FULL and the next snapshot $DELTA into a file called "delta.vhd":
+
+```sh
+xe vdi-export uuid=$DELTA filename=delta.vhd format=vhd base=$FULL --progress
+```
+
+We now have 2 files on the local system:
+
+- "full.vhd": a complete backup of the first snapshot
+- "delta.vhd": an incremental backup of the second snapshot, relative to
+ the first
+
+For example:
+
+```sh
+test $ ls -lh *.vhd
+-rw------- 1 dscott xendev 213M Aug 15 10:39 delta.vhd
+-rw------- 1 dscott xendev 8.0G Aug 15 10:39 full.vhd
+```
+
+To restore the original snapshot you must create an empty disk with the
+correct size. To find the size of a .vhd file use ```qemu-img``` as follows:
+
+```sh
+test $ qemu-img info delta.vhd
+image: delta.vhd
+file format: vpc
+virtual size: 24G (25769705472 bytes)
+disk size: 212M
+```
+
+Here the size is 25769705472 bytes.
+Create a fresh VDI in SR $SR to restore the backup as follows:
+
+```sh
+SIZE=25769705472
+RESTORE=$(xe vdi-create name-label=restored virtual-size=$SIZE sr-uuid=$SR type=user)
+```
+
+then import "full.vhd" into it:
+
+```sh
+xe vdi-import uuid=$RESTORE filename=full.vhd format=vhd --progress
+```
+
+Once "full.vhd" has been imported, the incremental backup can be restored
+on top:
+
+```sh
+xe vdi-import uuid=$RESTORE filename=delta.vhd format=vhd --progress
+```
+
+Note there is no need to supply a "base" parameter when importing; Xapi will
+treat the "vhd differencing disk" as a set of blocks and import them. It
+is up to you to check you are importing them to the right place.
+
+Now the VDI $RESTORE should have the same contents as $DELTA.
diff --git a/doc/content/xen-api/topics/udhcp.md b/doc/content/xen-api/topics/udhcp.md
new file mode 100644
index 00000000000..a126ed1624c
--- /dev/null
+++ b/doc/content/xen-api/topics/udhcp.md
@@ -0,0 +1,93 @@
+---
+title: API for configuring the udhcp server in Dom0
+layout: default
+---
+
+
+This API allows you to configure the DHCP service running on the Host
+Internal Management Network (HIMN). The API configures a udhcp daemon
+residing in Dom0 and alters the service configuration for any VM using
+the network.
+
+It should be noted that for this reason, that callers who modify the
+default configuration should be aware that their changes may have an
+adverse affect on other consumers of the HIMN.
+
+Version history
+---------------
+
+ Date State
+ ---- ----
+ 2013-3-15 Stable
+
+_Stable_: this API is considered stable and unlikely to change between
+software version and between hotfixes.
+
+API description
+---------------
+
+The API for configuring the network is based on a series of other_config
+keys that can be set by the caller on the HIMN XAPI network object. Once
+any of the keys below have been set, the caller must ensure that any VIFs
+attached to the HIMN are removed, destroyed, created and plugged.
+
+ ip_begin
+
+The first IP address in the desired subnet that the caller wishes the
+DHCP service to use.
+
+ ip_end
+
+The last IP address in the desired subnet that the caller wishes the
+DHCP service to use.
+
+ netmask
+
+The subnet mask for each of the issues IP addresses.
+
+ ip_disable_gw
+
+A boolean key for disabling the DHCP server from returning a default
+gateway for VMs on the network. To disable returning the gateway address
+set the key to True.
+
+_Note_: By default, the DHCP server will issue a default gateway for
+those requesting an address. Setting this key may disrupt applications
+that require the default gateway for communicating with Dom0 and so
+should be used with care.
+
+
+
+Example code
+------------
+
+An example python extract of setting the config for the network:
+
+ def get_himn_ref():
+ networks = session.xenapi.network.get_all_records()
+ for ref, rec in networks.iteritems():
+ if 'is_host_internal_management_network' \
+ in rec['other_config']:
+ return ref
+
+ raise Exception("Error: unable to find HIMN.")
+
+
+ himn_ref = get_himn_ref()
+ other_config = session.xenapi.network.get_other_config(himn_ref)
+
+ other_config['ip_begin'] = "169.254.0.1"
+ other_config['ip_end'] = "169.254.255.254"
+ other_config['netmask'] = "255.255.0.0"
+
+ session.xenapi.network.set_other_config(himn_ref, other_config)
+
+
+An example for how to disable the server returning a default gateway:
+
+ himn_ref = get_himn_ref()
+ other_config = session.xenapi.network.get_other_config(himn_ref)
+
+ other_config['ip_disable_gw'] = True
+
+ session.xenapi.network.set_other_config(himn_ref, other_config)
diff --git a/doc/content/xen-api/topics/vm-lifecycle.md b/doc/content/xen-api/topics/vm-lifecycle.md
new file mode 100644
index 00000000000..4531acd07f7
--- /dev/null
+++ b/doc/content/xen-api/topics/vm-lifecycle.md
@@ -0,0 +1,23 @@
++++
+title = "VM Lifecycle"
++++
+
+```mermaid
+graph
+ halted-- start(paused) -->paused
+ halted-- start(not paused) -->running
+ running-- suspend -->suspended
+ suspended-- resume(not paused) -->running
+ suspended-- resume(paused) -->paused
+ suspended-- hard shutdown -->halted
+ paused-- unpause -->running
+ paused-- hard shutdown -->halted
+ running-- clean shutdown\n hard shutdown -->halted
+ running-- pause -->paused
+ halted-- destroy -->destroyed
+```
+
+The figure above shows the states that a VM can be in and the
+API calls that can be used to move the VM between these states.
+
+{{% children %}}
diff --git a/doc/content/xen-api/topics/xencenter.md b/doc/content/xen-api/topics/xencenter.md
new file mode 100644
index 00000000000..1ea3128fe16
--- /dev/null
+++ b/doc/content/xen-api/topics/xencenter.md
@@ -0,0 +1,32 @@
+---
+title: XenCenter
+layout: default
+---
+
+XenCenter uses some conventions on top of the XenAPI:
+
+Internationalization for SR names
+---------------------------------
+
+The SRs created at install time now have an `other_config` key indicating how their names may be internationalized.
+
+`other_config["i18n-key"]` may be one of
+
+- `local-hotplug-cd`
+
+- `local-hotplug-disk`
+
+- `local-storage`
+
+- `xenserver-tools`
+
+Additionally, `other_config["i18n-original-value-"]` gives the value of that field when the SR was created. If XenCenter sees a record where `SR.name_label` equals `other_config["i18n-original-value-name_label"]` (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user's own language.
+
+If you change `SR.name_label` for your own purpose, then it no longer is the same as `other_config["i18n-original-value-name_label"]`. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.
+
+Hiding objects from XenCenter
+-----------------------------
+
+Networks, PIFs, and VMs can be hidden from XenCenter by adding the key `HideFromXenCenter=true` to the `other_config` parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn't be used directly by general users in your environment.
+
+In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.
diff --git a/doc/content/xen-api/usage.md b/doc/content/xen-api/usage.md
new file mode 100644
index 00000000000..525bc29a485
--- /dev/null
+++ b/doc/content/xen-api/usage.md
@@ -0,0 +1,530 @@
++++
+title = "Using the API"
+weight = 50
++++
+
+This chapter describes how to use the XenServer Management API from real programs to manage XenServer Hosts and VMs. The chapter begins with a walk-through of a typical client application and demonstrates how the API can be used to perform common tasks. Example code fragments are given in python syntax but equivalent code in the other programming languages would look very similar. The language bindings themselves are discussed afterwards and the chapter finishes with walk-throughs of two complete examples.
+
+Anatomy of a typical application
+--------------------------------
+
+This section describes the structure of a typical application using the XenServer Management API. Most client applications begin by connecting to a XenServer Host and authenticating (e.g. with a username and password). Assuming the authentication succeeds, the server will create a "session" object and return a reference to the client. This reference will be passed as an argument to all future API calls. Once authenticated, the client may search for references to other useful objects (e.g. XenServer Hosts, VMs, etc.) and invoke operations on them. Operations may be invoked either synchronously or asynchronously; special task objects represent the state and progress of asynchronous operations. These application elements are all described in detail in the following sections.
+
+### Choosing a low-level transport
+
+API calls can be issued over two transports:
+
+- SSL-encrypted TCP on port 443 (https) over an IP network
+
+- plaintext over a local Unix domain socket: `/var/xapi/xapi`
+
+The SSL-encrypted TCP transport is used for all off-host traffic while the Unix domain socket can be used from services running directly on the XenServer Host itself. In the SSL-encrypted TCP transport, all API calls should be directed at the Resource Pool master; failure to do so will result in the error `HOST_IS_SLAVE`, which includes the IP address of the master as an error parameter.
+
+Because the master host of a pool can change, especially if HA is enabled on a pool, clients must implement the following steps to detect a master host change and connect to the new master as required:
+
+Subscribe to updates in the list of hosts servers, and maintain a current list of hosts in the pool
+
+If the connection to the pool master fails to respond, attempt to connect to all hosts in the list until one responds
+
+The first host to respond will return the `HOST_IS_SLAVE` error message, which contains the identity of the new pool master (unless of course the host is the new master)
+
+Connect to the new master
+
+> **Note**
+>
+> As a special-case, all messages sent through the Unix domain socket are transparently forwarded to the correct node.
+
+### Authentication and session handling
+
+The vast majority of API calls take a session reference as their first parameter; failure to supply a valid reference will result in a `SESSION_INVALID` error being returned. Acquire a session reference by supplying a username and password to the `login_with_password` function.
+
+> **Note**
+>
+> As a special-case, if this call is executed over the local Unix domain socket then the username and password are ignored and the call always succeeds.
+
+Every session has an associated "last active" timestamp which is updated on every API call. The server software currently has a built-in limit of 500 active sessions and will remove those with the oldest "last active" field if this limit is exceeded for a given `username` or `originator`. In addition all sessions whose "last active" field is older than 24 hours are also removed. Therefore it is important to:
+
+- Specify an appropriate `originator` when logging in; and
+
+- Remember to log out of active sessions to avoid leaking them; and
+
+- Be prepared to log in again to the server if a `SESSION_INVALID` error is caught.
+
+In the following Python fragment a connection is established over the Unix domain socket and a session is created:
+
+ import XenAPI
+
+ session = XenAPI.xapi_local()
+ try:
+ session.xenapi.login_with_password("root", "", "2.3", "My Widget v0.1")
+ ...
+ finally:
+ session.xenapi.session.logout()
+
+### Finding references to useful objects
+
+Once an application has authenticated the next step is to acquire references to objects in order to query their state or invoke operations on them. All objects have a set of "implicit" messages which include the following:
+
+- `get_by_name_label` : return a list of all objects of a particular class with a particular label;
+
+- `get_by_uuid` : return a single object named by its UUID;
+
+- `get_all` : return a set of references to all objects of a particular class; and
+
+- `get_all_records` : return a map of reference to records for each object of a particular class.
+
+For example, to list all hosts:
+
+ hosts = session.xenapi.host.get_all()
+
+To find all VMs with the name "my first VM":
+
+ vms = session.xenapi.VM.get_by_name_label('my first VM')
+
+> **Note**
+>
+> Object `name_label` fields are not guaranteed to be unique and so the `get_by_name_label` API call returns a set of references rather than a single reference.
+
+In addition to the methods of finding objects described above, most objects also contain references to other objects within fields. For example it is possible to find the set of VMs running on a particular host by calling:
+
+ vms = session.xenapi.host.get_resident_VMs(host)
+
+### Invoking synchronous operations on objects
+
+Once object references have been acquired, operations may be invoked on them. For example to start a VM:
+
+ session.xenapi.VM.start(vm, False, False)
+
+All API calls are by default synchronous and will not return until the operation has completed or failed. For example in the case of `VM.start` the call does not return until the VM has started booting.
+
+> **Note**
+>
+> When the `VM.start` call returns the VM will be booting. To determine when the booting has finished, wait for the in-guest agent to report internal statistics through the `VM_guest_metrics` object.
+
+### Using Tasks to manage asynchronous operations
+
+To simplify managing operations which take quite a long time (e.g. `VM.clone` and `VM.copy`) functions are available in two forms: synchronous (the default) and asynchronous. Each asynchronous function returns a reference to a task object which contains information about the in-progress operation including:
+
+- whether it is pending
+
+- whether it is has succeeded or failed
+
+- progress (in the range 0-1)
+
+- the result or error code returned by the operation
+
+An application which wanted to track the progress of a `VM.clone` operation and display a progress bar would have code like the following:
+
+ vm = session.xenapi.VM.get_by_name_label('my vm')
+ task = session.xenapi.Async.VM.clone(vm)
+ while session.xenapi.task.get_status(task) == "pending":
+ progress = session.xenapi.task.get_progress(task)
+ update_progress_bar(progress)
+ time.sleep(1)
+ session.xenapi.task.destroy(task)
+
+> **Note**
+>
+> Note that a well-behaved client should remember to delete tasks created by asynchronous operations when it has finished reading the result or error. If the number of tasks exceeds a built-in threshold then the server will delete the oldest of the completed tasks.
+
+### Subscribing to and listening for events
+
+With the exception of the task and metrics classes, whenever an object is modified the server generates an event. Clients can subscribe to this event stream on a per-class basis and receive updates rather than resorting to frequent polling. Events come in three types:
+
+- `add` - generated when an object has been created;
+
+- `del` - generated immediately before an object is destroyed; and
+
+- `mod` - generated when an object's field has changed.
+
+Events also contain a monotonically increasing ID, the name of the class of object and a snapshot of the object state equivalent to the result of a `get_record()`.
+
+Clients register for events by calling `event.register()` with a list of class names or the special string "\*". Clients receive events by executing `event.next()` which blocks until events are available and returns the new events.
+
+> **Note**
+>
+> Since the queue of generated events on the server is of finite length a very slow client might fail to read the events fast enough; if this happens an `EVENTS_LOST` error is returned. Clients should be prepared to handle this by re-registering for events and checking that the condition they are waiting for hasn't become true while they were unregistered.
+
+The following python code fragment demonstrates how to print a summary of every event generated by a system: (similar code exists in `Xenserver-SDK/XenServerPython/samples/watch-all-events.py`)
+
+ fmt = "%8s %20s %5s %s"
+ session.xenapi.event.register(["*"])
+ while True:
+ try:
+ for event in session.xenapi.event.next():
+ name = "(unknown)"
+ if "snapshot" in event.keys():
+ snapshot = event["snapshot"]
+ if "name_label" in snapshot.keys():
+ name = snapshot["name_label"]
+ print fmt % (event['id'], event['class'], event['operation'], name)
+ except XenAPI.Failure, e:
+ if e.details == [ "EVENTS_LOST" ]:
+ print "Caught EVENTS_LOST; should reregister"
+
+Language bindings
+-----------------
+
+### C
+
+The SDK includes the source to the C language binding in the directory `XenServer-SDK/libxenserver/src` together with a Makefile which compiles the binding into a library. Every API object is associated with a header file which contains declarations for all that object's API functions; for example the type definitions and functions required to invoke VM operations are all contained in `xen_vm.h`.
+
+**C binding dependencies**
+
+
+
+
+
+
+Platform supported:
+Linux
+
+
+Library:
+The language binding is generated as a libxenserver.so
that is linked by C programs.
+
+
+Dependencies:
+
+
+
+
+
+The following simple examples are included with the C bindings:
+
+- `test_vm_async_migrate`: demonstrates how to use asynchronous API calls to migrate running VMs from a slave host to the pool master.
+
+- `test_vm_ops`: demonstrates how to query the capabilities of a host, create a VM, attach a fresh blank disk image to the VM and then perform various powercycle operations;
+
+- `test_failures`: demonstrates how to translate error strings into enum\_xen\_api\_failure, and vice versa;
+
+- `test_event_handling`: demonstrates how to listen for events on a connection.
+
+- `test_enumerate`: demonstrates how to enumerate the various API objects.
+
+### C#
+
+The C\# bindings are contained within the directory `XenServer-SDK/XenServer.NET` and include project files suitable for building under Microsoft Visual Studio. Every API object is associated with one C\# file; for example the functions implementing the VM operations are contained within the file `VM.cs`.
+
+**C\# binding dependencies**
+
+
+
+
+
+
+Platform supported:
+Windows with .NET version 4.5
+
+
+Library:
+The language binding is generated as a Dynamic Link Library XenServer.dll
that is linked by C# programs.
+
+
+Dependencies:
+CookComputing.XMLRpcV2.dll
is needed for the XenServer.dll to be able to communicate with the xml-rpc server. We test with version 2.1.0.6 and recommend that you use this version, though others may work.
+
+
+
+
+Three examples are included with the C\# bindings in the directory `XenServer-SDK/XenServer.NET/samples` as separate projects of the `XenSdkSample.sln` solution:
+
+- `GetVariousRecords`: logs into a XenServer Host and displays information about hosts, storage and virtual machines;
+
+- `GetVmRecords`: logs into a XenServer Host and lists all the VM records;
+
+- `VmPowerStates`: logs into a XenServer Host, finds a VM and takes it through the various power states. Requires a shut-down VM to be already installed.
+
+### Java
+
+The Java bindings are contained within the directory `XenServer-SDK/XenServerJava` and include project files suitable for building under Microsoft Visual Studio. Every API object is associated with one Java file; for example the functions implementing the VM operations are contained within the file `VM.java`.
+
+**Java binding dependencies**
+
+
+
+
+
+
+Platform supported:
+Linux and Windows
+
+
+Library:
+The language binding is generated as a Java Archive file xenserver-PRODUCT_VERSION.jar
that is linked by Java programs.
+
+
+Dependencies:
+
+xmlrpc-client-3.1.jar is needed for the xenserver.jar to be able to communicate with the xml-rpc server.
+ws-commons-util-1.0.2.jar is needed to run the examples.
+
+
+
+
+
+Running the main file `XenServer-SDK/XenServerJava/samples/RunTests.java` will run a series of examples included in the same directory:
+
+- `AddNetwork`: Adds a new internal network not attached to any NICs;
+
+- `SessionReuse`: Demonstrates how a Session object can be shared between multiple Connections;
+
+- `AsyncVMCreate`: Makes asynchronously a new VM from a built-in template, starts and stops it;
+
+- `VdiAndSrOps`: Performs various SR and VDI tests, including creating a dummy SR;
+
+- `CreateVM`: Creates a VM on the default SR with a network and DVD drive;
+
+- `DeprecatedMethod`: Tests a warning is displayed wehn a deprecated API method is called;
+
+- `GetAllRecordsOfAllTypes`: Retrieves all the records for all types of objects;
+
+- `SharedStorage`: Creates a shared NFS SR;
+
+- `StartAllVMs`: Connects to a host and tries to start each VM on it.
+
+### PowerShell
+
+The PowerShell bindings are contained within the directory `XenServer-SDK/XenServerPowerShell`. We provide the PowerShell module `XenServerPSModule` and source code exposing the XenServer API as Windows PowerShell cmdlets.
+
+**PowerShell binding dependencies**
+
+
+
+
+
+
+Platform supported:
+Windows with .NET Framework 4.5 and PowerShell v4.0
+
+
+Library:
+XenServerPSModule
+
+
+Dependencies:
+CookComputing.XMLRpcV2.dll
is needed to be able to communicate with the xml-rpc server. We test with version 2.1.0.6 and recommend that you use this version, though others may work.
+
+
+
+
+These example scripts are included with the PowerShell bindings in the directory `XenServer-SDK/XenServerPowerShell/samples`:
+
+- `AutomatedTestCore.ps1`: demonstrates how to log into a XenServer host, create a storage repository and a VM, and then perform various powercycle operations;
+
+- `HttpTest.ps1`: demonstrates how to log into a XenServer host, create a VM, and then perform operations such as VM importing and exporting, patch upload, and retrieval of performance statistics.
+
+### Python
+
+The python bindings are contained within a single file: `XenServer-SDK/XenServerPython/XenAPI.py`.
+
+**Python binding dependencies**
+
+|:--|:--|
+|Platform supported:|Linux|
+|Library:|XenAPI.py|
+|Dependencies:|None|
+
+The SDK includes 7 python examples:
+
+- `fixpbds.py` - reconfigures the settings used to access shared storage;
+
+- `install.py` - installs a Debian VM, connects it to a network, starts it up and waits for it to report its IP address;
+
+- `license.py` - uploads a fresh license to a XenServer Host;
+
+- `permute.py` - selects a set of VMs and uses XenMotion to move them simultaneously between hosts;
+
+- `powercycle.py` - selects a set of VMs and powercycles them;
+
+- `shell.py` - a simple interactive shell for testing;
+
+- `vm_start_async.py` - demonstrates how to invoke operations asynchronously;
+
+- `watch-all-events.py` - registers for all events and prints details when they occur.
+
+### Command Line Interface (CLI)
+
+Besides using raw XML-RPC or one of the supplied language bindings, third-party software developers may integrate with XenServer Hosts by using the XE command line interface `xe`. The xe CLI is installed by default on XenServer hosts; a stand-alone remote CLI is also available for Linux. On Windows, the `xe.exe` CLI executable is installed along with XenCenter.
+
+**CLI dependencies**
+
+|:--|:--|
+|Platform supported:|Linux and Windows|
+|Library:|None|
+|Binary:|xe (xe.exe on Windows)|
+|Dependencies:|None|
+
+The CLI allows almost every API call to be directly invoked from a script or other program, silently taking care of the required session management.
+The XE CLI syntax and capabilities are described in detail in the [XenServer Administrator's Guide](https://docs.citrix.com/en-us/citrix-hypervisor/command-line-interface.html). For additional resources and examples, visit the [Citrix Knowledge Center](http://support.citrix.com).
+
+> **Note**
+>
+> When running the CLI from a XenServer Host console, tab-completion of both command names and arguments is available.
+
+Complete application examples
+-----------------------------
+
+This section describes two complete examples of real programs using the API.
+
+### Simultaneously migrating VMs using XenMotion
+
+This python example (contained in `XenServer-SDK/XenServerPython/samples/permute.py`) demonstrates how to use XenMotion to move VMs simultaneously between hosts in a Resource Pool. The example makes use of asynchronous API calls and shows how to wait for a set of tasks to complete.
+
+The program begins with some standard boilerplate and imports the API bindings module
+
+ import sys, time
+ import XenAPI
+
+Next the commandline arguments containing a server URL, username, password and a number of iterations are parsed. The username and password are used to establish a session which is passed to the function `main`, which is called multiple times in a loop. Note the use of `try: finally:` to make sure the program logs out of its session at the end.
+
+ if __name__ == "__main__":
+ if len(sys.argv) <> 5:
+ print "Usage:"
+ print sys.argv[0], " "
+ sys.exit(1)
+ url = sys.argv[1]
+ username = sys.argv[2]
+ password = sys.argv[3]
+ iterations = int(sys.argv[4])
+ # First acquire a valid session by logging in:
+ session = XenAPI.Session(url)
+ session.xenapi.login_with_password(username, password, "2.3",
+ "Example migration-demo v0.1")
+ try:
+ for i in range(iterations):
+ main(session, i)
+ finally:
+ session.xenapi.session.logout()
+
+The `main` function examines each running VM in the system, taking care to filter out *control domains* (which are part of the system and not controllable by the user). A list of running VMs and their current hosts is constructed.
+
+ def main(session, iteration):
+ # Find a non-template VM object
+ all = session.xenapi.VM.get_all()
+ vms = []
+ hosts = []
+ for vm in all:
+ record = session.xenapi.VM.get_record(vm)
+ if not(record["is_a_template"]) and \
+ not(record["is_control_domain"]) and \
+ record["power_state"] == "Running":
+ vms.append(vm)
+ hosts.append(record["resident_on"])
+ print "%d: Found %d suitable running VMs" % (iteration, len(vms))
+
+Next the list of hosts is rotated:
+
+ # use a rotation as a permutation
+ hosts = [hosts[-1]] + hosts[:(len(hosts)-1)]
+
+Each VM is then moved using XenMotion to the new host under this rotation (i.e. a VM running on host at position 2 in the list will be moved to the host at position 1 in the list etc.) In order to execute each of the movements in parallel, the asynchronous version of the `VM.pool_migrate` is used and a list of task references constructed. Note the `live` flag passed to the `VM.pool_migrate`; this causes the VMs to be moved while they are still running.
+
+ tasks = []
+ for i in range(0, len(vms)):
+ vm = vms[i]
+ host = hosts[i]
+ task = session.xenapi.Async.VM.pool_migrate(vm, host, { "live": "true" })
+ tasks.append(task)
+
+The list of tasks is then polled for completion:
+
+ finished = False
+ records = {}
+ while not(finished):
+ finished = True
+ for task in tasks:
+ record = session.xenapi.task.get_record(task)
+ records[task] = record
+ if record["status"] == "pending":
+ finished = False
+ time.sleep(1)
+
+Once all tasks have left the *pending* state (i.e. they have successfully completed, failed or been cancelled) the tasks are polled once more to see if they all succeeded:
+
+ allok = True
+ for task in tasks:
+ record = records[task]
+ if record["status"] <> "success":
+ allok = False
+
+If any one of the tasks failed then details are printed, an exception is raised and the task objects left around for further inspection. If all tasks succeeded then the task objects are destroyed and the function returns.
+
+ if not(allok):
+ print "One of the tasks didn't succeed at", \
+ time.strftime("%F:%HT%M:%SZ", time.gmtime())
+ idx = 0
+ for task in tasks:
+ record = records[task]
+ vm_name = session.xenapi.VM.get_name_label(vms[idx])
+ host_name = session.xenapi.host.get_name_label(hosts[idx])
+ print "%s : %12s %s -> %s [ status: %s; result = %s; error = %s ]" % \
+ (record["uuid"], record["name_label"], vm_name, host_name, \
+ record["status"], record["result"], repr(record["error_info"]))
+ idx = idx + 1
+ raise "Task failed"
+ else:
+ for task in tasks:
+ session.xenapi.task.destroy(task)
+
+### Cloning a VM using the XE CLI
+
+This example is a `bash` script which uses the XE CLI to clone a VM taking care to shut it down first if it is powered on.
+
+The example begins with some boilerplate which first checks if the environment variable `XE` has been set: if it has it assumes that it points to the full path of the CLI, else it is assumed that the XE CLI is on the current path. Next the script prompts the user for a server name, username and password:
+
+ # Allow the path to the 'xe' binary to be overridden by the XE environment variable
+ if [ -z "${XE}" ]; then
+ XE=xe
+ fi
+
+ if [ ! -e "${HOME}/.xe" ]; then
+ read -p "Server name: " SERVER
+ read -p "Username: " USERNAME
+ read -p "Password: " PASSWORD
+ XE="${XE} -s ${SERVER} -u ${USERNAME} -pw ${PASSWORD}"
+ fi
+
+Next the script checks its commandline arguments. It requires exactly one: the UUID of the VM which is to be cloned:
+
+ # Check if there's a VM by the uuid specified
+ ${XE} vm-list params=uuid | grep -q " ${vmuuid}$"
+ if [ $? -ne 0 ]; then
+ echo "error: no vm uuid \"${vmuuid}\" found"
+ exit 2
+ fi
+
+The script then checks the power state of the VM and if it is running, it attempts a clean shutdown. The event system is used to wait for the VM to enter state "Halted".
+
+> **Note**
+>
+> The XE CLI supports a command-line argument `--minimal` which causes it to print its output without excess whitespace or formatting, ideal for use from scripts. If multiple values are returned they are comma-separated.
+
+ # Check the power state of the vm
+ name=$(${XE} vm-list uuid=${vmuuid} params=name-label --minimal)
+ state=$(${XE} vm-list uuid=${vmuuid} params=power-state --minimal)
+ wasrunning=0
+
+ # If the VM state is running, we shutdown the vm first
+ if [ "${state}" = "running" ]; then
+ ${XE} vm-shutdown uuid=${vmuuid}
+ ${XE} event-wait class=vm power-state=halted uuid=${vmuuid}
+ wasrunning=1
+ fi
+
+The VM is then cloned and the new VM has its `name_label` set to `cloned_vm`.
+
+ # Clone the VM
+ newuuid=$(${XE} vm-clone uuid=${vmuuid} new-name-label=cloned_vm)
+
+Finally, if the original VM had been running and was shutdown, both it and the new VM are started.
+
+ # If the VM state was running before cloning, we start it again
+ # along with the new VM.
+ if [ "$wasrunning" -eq 1 ]; then
+ ${XE} vm-start uuid=${vmuuid}
+ ${XE} vm-start uuid=${newuuid}
+ fi
diff --git a/doc/data/release_info.json b/doc/data/release_info.json
new file mode 100644
index 00000000000..0f94612bbe4
--- /dev/null
+++ b/doc/data/release_info.json
@@ -0,0 +1 @@
+{"rio":[{"transition":"published class","name":"PBD","log":"The physical block devices through which hosts access SRs"},{"transition":"published class","name":"PIF","log":"A physical network interface (note separate VLANs are represented as several PIFs)"},{"transition":"published class","name":"PIF_metrics","log":"The metrics associated with a physical network interface"},{"transition":"published class","name":"SM","log":"A storage manager plugin"},{"transition":"published class","name":"SR","log":"A storage repository"},{"transition":"published class","name":"VBD","log":"A virtual block device"},{"transition":"published class","name":"VBD_metrics","log":"The metrics associated with a virtual block device"},{"transition":"published class","name":"VDI","log":"A virtual disk image"},{"transition":"published class","name":"VIF","log":"A virtual network interface"},{"transition":"published class","name":"VIF_metrics","log":"The metrics associated with a virtual network device"},{"transition":"published class","name":"VM","log":"A virtual machine (or 'guest')."},{"transition":"published class","name":"VM_guest_metrics","log":"The metrics reported by the guest (as opposed to inferred from outside)"},{"transition":"published class","name":"VM_metrics","log":"The metrics associated with a VM"},{"transition":"published class","name":"console","log":"A console"},{"transition":"published class","name":"crashdump","log":"A VM crashdump"},{"transition":"published class","name":"event","log":"Asynchronous event registration and handling"},{"transition":"published class","name":"host","log":"A physical host"},{"transition":"published class","name":"host_cpu","log":"A physical CPU"},{"transition":"published class","name":"host_crashdump","log":"Represents a host crash dump"},{"transition":"published class","name":"host_metrics","log":"The metrics associated with a host"},{"transition":"published class","name":"host_patch","log":"Represents a patch stored on a server"},{"transition":"published class","name":"network","log":"A virtual network"},{"transition":"published class","name":"pool","log":"Pool-wide information"},{"transition":"published class","name":"session","log":"A session"},{"transition":"published class","name":"task","log":"A long-running asynchronous task"},{"transition":"published class","name":"user","log":"A user of the system"},{"transition":"published field","name":"Bond.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"Cluster.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"Cluster.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"Cluster_host.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"Cluster_host.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"DR_task.introduced_SRs","log":"All SRs introduced by this appliance"},{"transition":"published field","name":"DR_task.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"Feature.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"Feature.name_label","log":"a human-readable name"},{"transition":"published field","name":"LVHD.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"Observer.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"Observer.name_label","log":"a human-readable name"},{"transition":"published field","name":"PBD.SR","log":"the storage repository that the pbd realises"},{"transition":"published field","name":"PBD.currently_attached","log":"is the SR currently attached on this host?"},{"transition":"published field","name":"PBD.device_config","log":"a config string to string map that is provided to the host's SR-backend-driver"},{"transition":"published field","name":"PBD.host","log":"physical machine on which the pbd is available"},{"transition":"published field","name":"PBD.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PIF.MAC","log":"ethernet MAC address of physical interface"},{"transition":"published field","name":"PIF.MTU","log":"MTU in octets"},{"transition":"published field","name":"PIF.VLAN","log":"VLAN tag for all traffic passing through this interface"},{"transition":"published field","name":"PIF.device","log":"machine-readable name of the interface (e.g. eth0)"},{"transition":"published field","name":"PIF.host","log":"physical machine to which this pif is connected"},{"transition":"published field","name":"PIF.metrics","log":"metrics associated with this PIF"},{"transition":"published field","name":"PIF.network","log":"virtual network to which this pif is connected"},{"transition":"published field","name":"PIF.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PIF_metrics.carrier","log":"Report if the PIF got a carrier or not"},{"transition":"published field","name":"PIF_metrics.device_id","log":"Report device ID"},{"transition":"published field","name":"PIF_metrics.device_name","log":"Report device name"},{"transition":"published field","name":"PIF_metrics.duplex","log":"Full duplex capability of the link (if available)"},{"transition":"published field","name":"PIF_metrics.io_read_kbs","log":"Read bandwidth (KiB/s)"},{"transition":"published field","name":"PIF_metrics.io_write_kbs","log":"Write bandwidth (KiB/s)"},{"transition":"published field","name":"PIF_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"PIF_metrics.pci_bus_path","log":"PCI bus path of the pif (if available)"},{"transition":"published field","name":"PIF_metrics.speed","log":"Speed of the link in Mbit/s (if available)"},{"transition":"published field","name":"PIF_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PIF_metrics.vendor_id","log":"Report vendor ID"},{"transition":"published field","name":"PIF_metrics.vendor_name","log":"Report vendor name"},{"transition":"published field","name":"Repository.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"Repository.name_label","log":"a human-readable name"},{"transition":"published field","name":"SM.configuration","log":"names and descriptions of device config keys"},{"transition":"published field","name":"SM.copyright","log":"Entity which owns the copyright of this plugin"},{"transition":"published field","name":"SM.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"SM.name_label","log":"a human-readable name"},{"transition":"published field","name":"SM.required_api_version","log":"Minimum SM API version required on the server"},{"transition":"published field","name":"SM.type","log":"SR.type"},{"transition":"published field","name":"SM.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"SM.vendor","log":"Vendor who created this plugin"},{"transition":"published field","name":"SM.version","log":"Version of the plugin"},{"transition":"published field","name":"SR.PBDs","log":"describes how particular hosts can see this storage repository"},{"transition":"published field","name":"SR.VDIs","log":"all virtual disks known to this storage repository"},{"transition":"published field","name":"SR.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"SR.content_type","log":"the type of the SR's content, if required (e.g. ISOs)"},{"transition":"published field","name":"SR.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"SR.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"SR.name_label","log":"a human-readable name"},{"transition":"published field","name":"SR.other_config","log":"additional configuration"},{"transition":"published field","name":"SR.physical_size","log":"total physical size of the repository (in bytes)"},{"transition":"published field","name":"SR.physical_utilisation","log":"physical space currently utilised on this storage repository (in bytes). Note that for sparse disk formats, physical_utilisation may be less than virtual_allocation"},{"transition":"published field","name":"SR.shared","log":"true if this SR is (capable of being) shared between multiple hosts"},{"transition":"published field","name":"SR.type","log":"type of the storage repository"},{"transition":"published field","name":"SR.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"SR.virtual_allocation","log":"sum of virtual_sizes of all VDIs in this storage repository (in bytes)"},{"transition":"published field","name":"VBD.VDI","log":"the virtual disk"},{"transition":"published field","name":"VBD.VM","log":"the virtual machine"},{"transition":"published field","name":"VBD.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VBD.bootable","log":"true if this VBD is bootable"},{"transition":"published field","name":"VBD.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VBD.currently_attached","log":"is the device currently attached (erased on reboot)"},{"transition":"published field","name":"VBD.device","log":"device seen by the guest e.g. hda1"},{"transition":"published field","name":"VBD.empty","log":"if true this represents an empty drive"},{"transition":"published field","name":"VBD.metrics","log":"metrics associated with this VBD"},{"transition":"published field","name":"VBD.mode","log":"the mode the VBD should be mounted with"},{"transition":"published field","name":"VBD.other_config","log":"additional configuration"},{"transition":"published field","name":"VBD.qos_algorithm_params","log":"parameters for chosen QoS algorithm"},{"transition":"published field","name":"VBD.qos_algorithm_type","log":"QoS algorithm to use"},{"transition":"published field","name":"VBD.qos_supported_algorithms","log":"supported QoS algorithms for this VBD"},{"transition":"published field","name":"VBD.runtime_properties","log":"Device runtime properties"},{"transition":"published field","name":"VBD.status_code","log":"error/success code associated with last attach-operation (erased on reboot)"},{"transition":"published field","name":"VBD.status_detail","log":"error/success information associated with last attach-operation status (erased on reboot)"},{"transition":"published field","name":"VBD.storage_lock","log":"true if a storage level lock was acquired"},{"transition":"published field","name":"VBD.type","log":"how the VBD will appear to the guest (e.g. disk or CD)"},{"transition":"published field","name":"VBD.userdevice","log":"user-friendly device name e.g. 0,1,2,etc."},{"transition":"published field","name":"VBD.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VBD_metrics.io_read_kbs","log":"Read bandwidth (KiB/s)"},{"transition":"published field","name":"VBD_metrics.io_write_kbs","log":"Write bandwidth (KiB/s)"},{"transition":"published field","name":"VBD_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"VBD_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VDI.SR","log":"storage repository in which the VDI resides"},{"transition":"published field","name":"VDI.VBDs","log":"list of vbds that refer to this disk"},{"transition":"published field","name":"VDI.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VDI.crash_dumps","log":"list of crash dumps that refer to this disk"},{"transition":"published field","name":"VDI.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VDI.managed","log":""},{"transition":"published field","name":"VDI.missing","log":"true if SR scan operation reported this VDI as not present on disk"},{"transition":"published field","name":"VDI.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"VDI.name_label","log":"a human-readable name"},{"transition":"published field","name":"VDI.other_config","log":"additional configuration"},{"transition":"published field","name":"VDI.parent","log":"This field is always null. Deprecated"},{"transition":"published field","name":"VDI.physical_utilisation","log":"amount of physical space that the disk image is currently taking up on the storage repository (in bytes)"},{"transition":"published field","name":"VDI.read_only","log":"true if this disk may ONLY be mounted read-only"},{"transition":"published field","name":"VDI.sharable","log":"true if this disk may be shared"},{"transition":"published field","name":"VDI.storage_lock","log":"true if this disk is locked at the storage level"},{"transition":"published field","name":"VDI.type","log":"type of the VDI"},{"transition":"published field","name":"VDI.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VDI.virtual_size","log":"size of disk as presented to the guest (in bytes). Note that, depending on storage backend type, requested size may not be respected exactly"},{"transition":"published field","name":"VIF.MAC","log":"ethernet MAC address of virtual interface, as exposed to guest"},{"transition":"published field","name":"VIF.MTU","log":"MTU in octets"},{"transition":"published field","name":"VIF.VM","log":"virtual machine to which this vif is connected"},{"transition":"published field","name":"VIF.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VIF.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VIF.currently_attached","log":"is the device currently attached (erased on reboot)"},{"transition":"published field","name":"VIF.device","log":"order in which VIF backends are created by xapi"},{"transition":"published field","name":"VIF.metrics","log":"metrics associated with this VIF"},{"transition":"published field","name":"VIF.network","log":"virtual network to which this vif is connected"},{"transition":"published field","name":"VIF.other_config","log":"additional configuration"},{"transition":"published field","name":"VIF.qos_algorithm_params","log":"parameters for chosen QoS algorithm"},{"transition":"published field","name":"VIF.qos_algorithm_type","log":"QoS algorithm to use"},{"transition":"published field","name":"VIF.qos_supported_algorithms","log":"supported QoS algorithms for this VIF"},{"transition":"published field","name":"VIF.runtime_properties","log":"Device runtime properties"},{"transition":"published field","name":"VIF.status_code","log":"error/success code associated with last attach-operation (erased on reboot)"},{"transition":"published field","name":"VIF.status_detail","log":"error/success information associated with last attach-operation status (erased on reboot)"},{"transition":"published field","name":"VIF.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VIF_metrics.io_read_kbs","log":"Read bandwidth (KiB/s)"},{"transition":"published field","name":"VIF_metrics.io_write_kbs","log":"Write bandwidth (KiB/s)"},{"transition":"published field","name":"VIF_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"VIF_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VLAN.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM.HVM_boot_params","log":"HVM boot params"},{"transition":"published field","name":"VM.HVM_boot_policy","log":"HVM boot policy"},{"transition":"published field","name":"VM.PCI_bus","log":"PCI bus path for pass-through devices"},{"transition":"published field","name":"VM.PV_args","log":"kernel command-line arguments"},{"transition":"published field","name":"VM.PV_bootloader","log":"name of or path to bootloader"},{"transition":"published field","name":"VM.PV_bootloader_args","log":"miscellaneous arguments for the bootloader"},{"transition":"published field","name":"VM.PV_kernel","log":"path to the kernel"},{"transition":"published field","name":"VM.PV_legacy_args","log":"to make Zurich guests boot"},{"transition":"published field","name":"VM.PV_ramdisk","log":"path to the initrd"},{"transition":"published field","name":"VM.VBDs","log":"virtual block devices"},{"transition":"published field","name":"VM.VCPUs_at_startup","log":"Boot number of VCPUs"},{"transition":"published field","name":"VM.VCPUs_max","log":"Max number of VCPUs"},{"transition":"published field","name":"VM.VCPUs_params","log":"configuration parameters for the selected VCPU policy"},{"transition":"published field","name":"VM.VIFs","log":"virtual network interfaces"},{"transition":"published field","name":"VM.VTPMs","log":"virtual TPMs"},{"transition":"published field","name":"VM.VUSBs","log":"virtual usb devices"},{"transition":"published field","name":"VM.actions_after_crash","log":"action to take if the guest crashes"},{"transition":"published field","name":"VM.actions_after_reboot","log":"action to take after the guest has rebooted itself"},{"transition":"published field","name":"VM.actions_after_shutdown","log":"action to take after the guest has shutdown itself"},{"transition":"published field","name":"VM.affinity","log":"A host which the VM has some affinity for (or NULL). This is used as a hint to the start call when it decides where to run the VM. Resource constraints may cause the VM to be started elsewhere."},{"transition":"published field","name":"VM.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VM.appliance","log":"the appliance to which this VM belongs"},{"transition":"published field","name":"VM.consoles","log":"virtual console devices"},{"transition":"published field","name":"VM.crash_dumps","log":"crash dumps associated with this VM"},{"transition":"published field","name":"VM.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VM.domarch","log":"Domain architecture (if available, null string otherwise)"},{"transition":"published field","name":"VM.domid","log":"domain ID (if available, -1 otherwise)"},{"transition":"published field","name":"VM.guest_metrics","log":"metrics associated with the running guest"},{"transition":"published field","name":"VM.is_a_template","log":"true if this is a template. Template VMs can never be started, they are used only for cloning other VMs"},{"transition":"published field","name":"VM.is_control_domain","log":"true if this is a control domain (domain 0 or a driver domain)"},{"transition":"published field","name":"VM.last_boot_CPU_flags","log":"describes the CPU flags on which the VM was last booted"},{"transition":"published field","name":"VM.memory_dynamic_max","log":"Dynamic maximum (bytes)"},{"transition":"published field","name":"VM.memory_dynamic_min","log":"Dynamic minimum (bytes)"},{"transition":"published field","name":"VM.memory_overhead","log":"Virtualization memory overhead (bytes)."},{"transition":"published field","name":"VM.memory_static_max","log":"Statically-set (i.e. absolute) maximum (bytes). The value of this field at VM start time acts as a hard limit of the amount of memory a guest can use. New values only take effect on reboot."},{"transition":"published field","name":"VM.memory_static_min","log":"Statically-set (i.e. absolute) mininum (bytes). The value of this field indicates the least amount of memory this VM can boot with without crashing."},{"transition":"published field","name":"VM.memory_target","log":"Dynamically-set memory target (bytes). The value of this field indicates the current target for memory available to this VM."},{"transition":"published field","name":"VM.metrics","log":"metrics associated with this VM"},{"transition":"published field","name":"VM.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"VM.name_label","log":"a human-readable name"},{"transition":"published field","name":"VM.other_config","log":"additional configuration"},{"transition":"published field","name":"VM.platform","log":"platform-specific configuration"},{"transition":"published field","name":"VM.power_state","log":"Current power state of the machine"},{"transition":"published field","name":"VM.recommendations","log":"An XML specification of recommended values and ranges for properties of this VM"},{"transition":"published field","name":"VM.resident_on","log":"the host the VM is currently resident on"},{"transition":"published field","name":"VM.scheduled_to_be_resident_on","log":"the host on which the VM is due to be started/resumed/migrated. This acts as a memory reservation indicator"},{"transition":"published field","name":"VM.suspend_VDI","log":"The VDI that a suspend image is stored on. (Only has meaning if VM is currently suspended)"},{"transition":"published field","name":"VM.user_version","log":"Creators of VMs and templates may store version information here."},{"transition":"published field","name":"VM.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VMPP.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"VMPP.name_label","log":"a human-readable name"},{"transition":"published field","name":"VMSS.VMs","log":"all VMs attached to this snapshot schedule"},{"transition":"published field","name":"VMSS.enabled","log":"enable or disable this snapshot schedule"},{"transition":"published field","name":"VMSS.frequency","log":"frequency of taking snapshot from snapshot schedule"},{"transition":"published field","name":"VMSS.last_run_time","log":"time of the last snapshot"},{"transition":"published field","name":"VMSS.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"VMSS.name_label","log":"a human-readable name"},{"transition":"published field","name":"VMSS.retained_snapshots","log":"maximum number of snapshots that should be stored at any time"},{"transition":"published field","name":"VMSS.schedule","log":"schedule of the snapshot containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone"},{"transition":"published field","name":"VMSS.type","log":"type of the snapshot schedule"},{"transition":"published field","name":"VMSS.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM_appliance.VMs","log":"all VMs in this appliance"},{"transition":"published field","name":"VM_appliance.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VM_appliance.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VM_appliance.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"VM_appliance.name_label","log":"a human-readable name"},{"transition":"published field","name":"VM_appliance.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM_guest_metrics.PV_drivers_up_to_date","log":"true if the PV drivers appear to be up to date"},{"transition":"published field","name":"VM_guest_metrics.PV_drivers_version","log":"version of the PV drivers"},{"transition":"published field","name":"VM_guest_metrics.disks","log":"Disk configuration/free space"},{"transition":"published field","name":"VM_guest_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"VM_guest_metrics.memory","log":"free/used/total"},{"transition":"published field","name":"VM_guest_metrics.networks","log":"network configuration"},{"transition":"published field","name":"VM_guest_metrics.os_version","log":"version of the OS"},{"transition":"published field","name":"VM_guest_metrics.other","log":"anything else"},{"transition":"published field","name":"VM_guest_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM_metrics.VCPUs_CPU","log":"VCPU to PCPU map"},{"transition":"published field","name":"VM_metrics.VCPUs_flags","log":"CPU flags (blocked,online,running)"},{"transition":"published field","name":"VM_metrics.VCPUs_number","log":"Current number of VCPUs"},{"transition":"published field","name":"VM_metrics.VCPUs_params","log":"The live equivalent to VM.VCPUs_params"},{"transition":"published field","name":"VM_metrics.VCPUs_utilisation","log":"Utilisation for all of guest's current VCPUs"},{"transition":"published field","name":"VM_metrics.install_time","log":"Time at which the VM was installed"},{"transition":"published field","name":"VM_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"VM_metrics.memory_actual","log":"Guest's actual memory (bytes)"},{"transition":"published field","name":"VM_metrics.start_time","log":"Time at which this VM was last booted"},{"transition":"published field","name":"VM_metrics.state","log":"The state of the guest, eg blocked, dying etc"},{"transition":"published field","name":"VM_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VTPM.VM","log":"The virtual machine the TPM is attached to"},{"transition":"published field","name":"VTPM.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VTPM.backend","log":"The domain where the backend is located (unused)"},{"transition":"published field","name":"VTPM.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VTPM.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VUSB.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"VUSB.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"VUSB.currently_attached","log":"is the device currently attached"},{"transition":"published field","name":"blob.last_updated","log":"Time at which the data in the blob was last updated"},{"transition":"published field","name":"blob.mime_type","log":"The mime type associated with this object. Defaults to 'application/octet-stream' if the empty string is supplied"},{"transition":"published field","name":"blob.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"blob.name_label","log":"a human-readable name"},{"transition":"published field","name":"blob.size","log":"Size of the binary data, in bytes"},{"transition":"published field","name":"blob.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"console.VM","log":"VM to which this console is attached"},{"transition":"published field","name":"console.location","log":"URI for the console service"},{"transition":"published field","name":"console.other_config","log":"additional configuration"},{"transition":"published field","name":"console.protocol","log":"the protocol used by this console"},{"transition":"published field","name":"console.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"crashdump.VDI","log":"the virtual disk"},{"transition":"published field","name":"crashdump.VM","log":"the virtual machine"},{"transition":"published field","name":"crashdump.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"data_source.enabled","log":"true if the data source is being logged"},{"transition":"published field","name":"data_source.max","log":"the maximum value of the data source"},{"transition":"published field","name":"data_source.min","log":"the minimum value of the data source"},{"transition":"published field","name":"data_source.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"data_source.name_label","log":"a human-readable name"},{"transition":"published field","name":"data_source.standard","log":"true if the data source is enabled by default. Non-default data sources cannot be disabled"},{"transition":"published field","name":"data_source.units","log":"the units of the value"},{"transition":"published field","name":"data_source.value","log":"current value of the data source"},{"transition":"published field","name":"event.class","log":"The name of the class of the object that changed"},{"transition":"published field","name":"event.id","log":"An ID, monotonically increasing, and local to the current session"},{"transition":"published field","name":"event.obj_uuid","log":"The uuid of the object that changed"},{"transition":"published field","name":"event.operation","log":"The operation that was performed"},{"transition":"published field","name":"event.ref","log":"A reference to the object that changed"},{"transition":"published field","name":"event.timestamp","log":"The time at which the event occurred"},{"transition":"published field","name":"host.API_version_major","log":"major version number"},{"transition":"published field","name":"host.API_version_minor","log":"minor version number"},{"transition":"published field","name":"host.API_version_vendor","log":"identification of vendor"},{"transition":"published field","name":"host.API_version_vendor_implementation","log":"details of vendor implementation"},{"transition":"published field","name":"host.PBDs","log":"physical blockdevices"},{"transition":"published field","name":"host.PIFs","log":"physical network interfaces"},{"transition":"published field","name":"host.address","log":"The address by which this host can be contacted from any other host in the pool"},{"transition":"published field","name":"host.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"host.capabilities","log":"Xen capabilities"},{"transition":"published field","name":"host.cpu_configuration","log":"The CPU configuration on this host. May contain keys such as \"nr_nodes\", \"sockets_per_node\", \"cores_per_socket\", or \"threads_per_core\""},{"transition":"published field","name":"host.crash_dump_sr","log":"The SR in which VDIs for crash dumps are created"},{"transition":"published field","name":"host.crashdumps","log":"Set of host crash dumps"},{"transition":"published field","name":"host.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"host.enabled","log":"True if the host is currently enabled"},{"transition":"published field","name":"host.host_CPUs","log":"The physical CPUs on this host"},{"transition":"published field","name":"host.hostname","log":"The hostname of this host"},{"transition":"published field","name":"host.license_params","log":"State of the current license"},{"transition":"published field","name":"host.logging","log":"logging configuration"},{"transition":"published field","name":"host.memory_overhead","log":"Virtualization memory overhead (bytes)."},{"transition":"published field","name":"host.metrics","log":"metrics associated with this host"},{"transition":"published field","name":"host.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"host.name_label","log":"a human-readable name"},{"transition":"published field","name":"host.other_config","log":"additional configuration"},{"transition":"published field","name":"host.patches","log":"Set of host patches"},{"transition":"published field","name":"host.resident_VMs","log":"list of VMs currently resident on host"},{"transition":"published field","name":"host.sched_policy","log":"Scheduler policy currently in force on this host"},{"transition":"published field","name":"host.software_version","log":"version strings"},{"transition":"published field","name":"host.supported_bootloaders","log":"a list of the bootloaders installed on the machine"},{"transition":"published field","name":"host.suspend_image_sr","log":"The SR in which VDIs for suspend images are created"},{"transition":"published field","name":"host.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host_cpu.family","log":"the family (number) of the physical CPU"},{"transition":"published field","name":"host_cpu.features","log":"the physical CPU feature bitmap"},{"transition":"published field","name":"host_cpu.flags","log":"the flags of the physical CPU (a decoded version of the features field)"},{"transition":"published field","name":"host_cpu.host","log":"the host the CPU is in"},{"transition":"published field","name":"host_cpu.model","log":"the model number of the physical CPU"},{"transition":"published field","name":"host_cpu.modelname","log":"the model name of the physical CPU"},{"transition":"published field","name":"host_cpu.number","log":"the number of the physical CPU within the host"},{"transition":"published field","name":"host_cpu.speed","log":"the speed of the physical CPU"},{"transition":"published field","name":"host_cpu.stepping","log":"the stepping of the physical CPU"},{"transition":"published field","name":"host_cpu.utilisation","log":"the current CPU utilisation"},{"transition":"published field","name":"host_cpu.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host_cpu.vendor","log":"the vendor of the physical CPU"},{"transition":"published field","name":"host_crashdump.host","log":"Host the crashdump relates to"},{"transition":"published field","name":"host_crashdump.size","log":"Size of the crashdump"},{"transition":"published field","name":"host_crashdump.timestamp","log":"Time the crash happened"},{"transition":"published field","name":"host_crashdump.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host_metrics.last_updated","log":"Time at which this information was last updated"},{"transition":"published field","name":"host_metrics.live","log":"Pool master thinks this host is live"},{"transition":"published field","name":"host_metrics.memory_free","log":"Free host memory (bytes)"},{"transition":"published field","name":"host_metrics.memory_total","log":"Total host memory (bytes)"},{"transition":"published field","name":"host_metrics.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host_patch.applied","log":"True if the patch has been applied"},{"transition":"published field","name":"host_patch.host","log":"Host the patch relates to"},{"transition":"published field","name":"host_patch.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"host_patch.name_label","log":"a human-readable name"},{"transition":"published field","name":"host_patch.size","log":"Size of the patch"},{"transition":"published field","name":"host_patch.timestamp_applied","log":"Time the patch was applied"},{"transition":"published field","name":"host_patch.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host_patch.version","log":"Patch version number"},{"transition":"published field","name":"message.body","log":"The body of the message"},{"transition":"published field","name":"message.name","log":"The name of the message"},{"transition":"published field","name":"message.obj_uuid","log":"The uuid of the object this message is associated with"},{"transition":"published field","name":"message.priority","log":"The message priority, 0 being low priority"},{"transition":"published field","name":"message.timestamp","log":"The time at which the message was created"},{"transition":"published field","name":"message.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"network.PIFs","log":"list of connected pifs"},{"transition":"published field","name":"network.VIFs","log":"list of connected vifs"},{"transition":"published field","name":"network.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"network.bridge","log":"name of the bridge corresponding to this network on the local host"},{"transition":"published field","name":"network.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"network.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"network.name_label","log":"a human-readable name"},{"transition":"published field","name":"network.other_config","log":"additional configuration"},{"transition":"published field","name":"network.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"network_sriov.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"pool.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"pool.coordinator_bias","log":"true if bias against pool master when scheduling vms is enabled, false otherwise"},{"transition":"published field","name":"pool.crash_dump_SR","log":"The SR in which VDIs for crash dumps are created"},{"transition":"published field","name":"pool.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"pool.default_SR","log":"Default SR for VDIs"},{"transition":"published field","name":"pool.master","log":"The host that is pool master"},{"transition":"published field","name":"pool.name_description","log":"Description"},{"transition":"published field","name":"pool.name_label","log":"Short name"},{"transition":"published field","name":"pool.other_config","log":"additional configuration"},{"transition":"published field","name":"pool.suspend_image_SR","log":"The SR in which VDIs for suspend images are created"},{"transition":"published field","name":"pool.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"pool_patch.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"pool_patch.name_label","log":"a human-readable name"},{"transition":"published field","name":"pool_patch.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"pool_update.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"pool_update.name_label","log":"a human-readable name"},{"transition":"published field","name":"pool_update.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"pool_update.vdi","log":"VDI the update was uploaded to"},{"transition":"published field","name":"role.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"secret.other_config","log":"other_config"},{"transition":"published field","name":"secret.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"secret.value","log":"the secret"},{"transition":"published field","name":"session.last_active","log":"Timestamp for last time session was active"},{"transition":"published field","name":"session.pool","log":"True if this session relates to a intra-pool login, false otherwise"},{"transition":"published field","name":"session.this_host","log":"Currently connected host"},{"transition":"published field","name":"session.this_user","log":"Currently connected user"},{"transition":"published field","name":"session.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"subject.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"task.allowed_operations","log":"list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."},{"transition":"published field","name":"task.created","log":"Time task was created"},{"transition":"published field","name":"task.current_operations","log":"links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."},{"transition":"published field","name":"task.error_info","log":"if the task has failed, this field contains the set of associated error strings. Undefined otherwise."},{"transition":"published field","name":"task.finished","log":"Time task finished (i.e. succeeded or failed). If task-status is pending, then the value of this field has no meaning"},{"transition":"published field","name":"task.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"task.name_label","log":"a human-readable name"},{"transition":"published field","name":"task.progress","log":"This field contains the estimated fraction of the task which is complete. This field should not be used to determine whether the task is complete - for this the status field of the task should be used."},{"transition":"published field","name":"task.resident_on","log":"the host on which the task is running"},{"transition":"published field","name":"task.result","log":"if the task has completed successfully, this field contains the result value (either Void or an object reference). Undefined otherwise."},{"transition":"published field","name":"task.status","log":"current status of the task"},{"transition":"published field","name":"task.type","log":"if the task has completed successfully, this field contains the type of the encoded result (i.e. name of the class whose reference is in the result field). Undefined otherwise."},{"transition":"published field","name":"task.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"user.fullname","log":"full name"},{"transition":"published field","name":"user.short_name","log":"short name (e.g. userid)"},{"transition":"published field","name":"user.uuid","log":"Unique identifier/object reference"},{"transition":"published message","name":"PBD.plug","log":"Activate the specified PBD, causing the referenced SR to be attached and scanned"},{"transition":"published message","name":"PBD.unplug","log":"Deactivate the specified PBD, causing the referenced SR to be detached and nolonger scanned"},{"transition":"published message","name":"PIF.create_VLAN","log":"Create a VLAN interface from an existing physical interface"},{"transition":"published message","name":"PIF.destroy","log":"Destroy the PIF object (provided it is a VLAN interface)"},{"transition":"published message","name":"SR.create","log":"Create a new Storage Repository and introduce it into the managed system, creating both SR record and PBD record to attach it to current host (with specified device_config parameters)"},{"transition":"published message","name":"SR.destroy","log":"Destroy specified SR, removing SR-record from database and remove SR from disk. (In order to affect this operation the appropriate device_config is read from the specified SR's PBD on current host)"},{"transition":"published message","name":"SR.forget","log":"Removing specified SR-record from database, without attempting to remove SR from disk"},{"transition":"published message","name":"SR.get_supported_types","log":"Return a set of all the SR types supported by the system"},{"transition":"published message","name":"SR.introduce","log":"Introduce a new Storage Repository into the managed system"},{"transition":"published message","name":"SR.make","log":"Create a new Storage Repository on disk"},{"transition":"published message","name":"SR.scan","log":"Refreshes the list of VDIs associated with an SR"},{"transition":"published message","name":"SR.set_name_description","log":"Set the name description of the SR"},{"transition":"published message","name":"SR.set_name_label","log":"Set the name label of the SR"},{"transition":"published message","name":"SR.set_shared","log":"Sets the shared flag on the SR"},{"transition":"published message","name":"VBD.assert_attachable","log":"Throws an error if this VBD could not be attached to this VM if the VM were running. Intended for debugging."},{"transition":"published message","name":"VBD.eject","log":"Remove the media from the device and leave it empty"},{"transition":"published message","name":"VBD.insert","log":"Insert new media into the device"},{"transition":"published message","name":"VBD.plug","log":"Hotplug the specified VBD, dynamically attaching it to the running VM"},{"transition":"published message","name":"VBD.set_mode","log":"Sets the mode of the VBD. The power_state of the VM must be halted."},{"transition":"published message","name":"VBD.unplug","log":"Hot-unplug the specified VBD, dynamically unattaching it from the running VM"},{"transition":"published message","name":"VBD.unplug_force","log":"Forcibly unplug the specified VBD"},{"transition":"published message","name":"VDI.clone","log":"Take an exact copy of the VDI and return a reference to the new disk. If any driver_params are specified then these are passed through to the storage-specific substrate driver that implements the clone operation. NB the clone lives in the same Storage Repository as its parent."},{"transition":"published message","name":"VDI.copy","log":"Copies a VDI to an SR. There must be a host that can see both the source and destination SRs simultaneously"},{"transition":"published message","name":"VDI.forget","log":"Removes a VDI record from the database"},{"transition":"published message","name":"VDI.resize","log":"Resize the VDI."},{"transition":"published message","name":"VDI.resize_online","log":"Resize the VDI which may or may not be attached to running guests."},{"transition":"published message","name":"VDI.set_name_description","log":"Set the name description of the VDI. This can only happen when its SR is currently attached."},{"transition":"published message","name":"VDI.set_name_label","log":"Set the name label of the VDI. This can only happen when then its SR is currently attached."},{"transition":"published message","name":"VDI.set_read_only","log":"Sets the VDI's read_only field"},{"transition":"published message","name":"VDI.snapshot","log":"Take a read-only snapshot of the VDI, returning a reference to the snapshot. If any driver_params are specified then these are passed through to the storage-specific substrate driver that takes the snapshot. NB the snapshot lives in the same Storage Repository as its parent."},{"transition":"published message","name":"VIF.plug","log":"Hotplug the specified VIF, dynamically attaching it to the running VM"},{"transition":"published message","name":"VIF.unplug","log":"Hot-unplug the specified VIF, dynamically unattaching it from the running VM"},{"transition":"published message","name":"VM.add_to_VCPUs_params_live","log":"Add the given key-value pair to VM.VCPUs_params, and apply that value on the running VM"},{"transition":"published message","name":"VM.assert_can_boot_here","log":"Returns an error if the VM could not boot on this host for some reason"},{"transition":"published message","name":"VM.assert_operation_valid","log":"Check to see whether this operation is acceptable in the current state of the system, raising an error if the operation is invalid for some reason"},{"transition":"published message","name":"VM.clean_reboot","log":"Attempt to cleanly shutdown the specified VM (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.clean_shutdown","log":"Attempt to cleanly shutdown the specified VM. (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.clone","log":"Clones the specified VM, making a new VM. Clone automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write). This function can only be called when the VM is in the Halted State."},{"transition":"published message","name":"VM.copy","log":"Copies a VM to an SR. There must be a host that can see both the source and destination SRs simultaneously"},{"transition":"published message","name":"VM.get_allowed_VBD_devices","log":"Returns a list of the allowed values that a VBD device field can take"},{"transition":"published message","name":"VM.get_allowed_VIF_devices","log":"Returns a list of the allowed values that a VIF device field can take"},{"transition":"published message","name":"VM.get_boot_record","log":"Returns a record describing the VM's dynamic state, initialised when the VM boots and updated to reflect runtime configuration changes e.g. CPU hotplug"},{"transition":"published message","name":"VM.get_possible_hosts","log":"Return the list of hosts on which this VM may run."},{"transition":"published message","name":"VM.hard_reboot","log":"Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM."},{"transition":"published message","name":"VM.hard_shutdown","log":"Stop executing the specified VM without attempting a clean shutdown."},{"transition":"published message","name":"VM.pause","log":"Pause the specified VM. This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.pool_migrate","log":"Migrate a VM to another Host."},{"transition":"published message","name":"VM.power_state_reset","log":"Reset the power-state of the VM to halted in the database only. (Used to recover from slave failures in pooling scenarios by resetting the power-states of VMs running on dead slaves to halted.) This is a potentially dangerous operation; use with care."},{"transition":"published message","name":"VM.provision","log":"Inspects the disk configuration contained within the VM's other_config, creates VDIs and VBDs and then executes any applicable post-install script."},{"transition":"published message","name":"VM.resume","log":"Awaken the specified VM and resume it. This can only be called when the specified VM is in the Suspended state."},{"transition":"published message","name":"VM.resume_on","log":"Awaken the specified VM and resume it on a particular Host. This can only be called when the specified VM is in the Suspended state."},{"transition":"published message","name":"VM.send_sysrq","log":"Send the given key as a sysrq to this VM. The key is specified as a single character (a String of length 1). This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.send_trigger","log":"Send the named trigger to this VM. This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.set_HVM_boot_policy","log":"Set the VM.HVM_boot_policy field of the given VM, which will take effect when it is next started"},{"transition":"published message","name":"VM.set_VCPUs_number_live","log":"Set the number of VCPUs for a running VM"},{"transition":"published message","name":"VM.set_actions_after_crash","log":"Sets the actions_after_crash parameter"},{"transition":"published message","name":"VM.set_memory_target_live","log":"Set the memory target for a running VM"},{"transition":"published message","name":"VM.set_shadow_multiplier_live","log":"Set the shadow memory multiplier on a running VM"},{"transition":"published message","name":"VM.start","log":"Start the specified VM. This function can only be called with the VM is in the Halted State."},{"transition":"published message","name":"VM.start_on","log":"Start the specified VM on a particular host. This function can only be called with the VM is in the Halted State."},{"transition":"published message","name":"VM.suspend","log":"Suspend the specified VM to disk. This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.unpause","log":"Resume the specified VM. This can only be called when the specified VM is in the Paused state."},{"transition":"published message","name":"VM.update_allowed_operations","log":"Recomputes the list of acceptable operations"},{"transition":"published message","name":"crashdump.destroy","log":"Destroy the specified crashdump"},{"transition":"published message","name":"event.get_current_id","log":"Return the ID of the next event to be generated by the system"},{"transition":"published message","name":"event.next","log":"Blocking call which returns a (possibly empty) batch of events. This method is only recommended for legacy use. New development should use event.from which supersedes this method."},{"transition":"published message","name":"event.register","log":"Registers this session with the event system for a set of given classes. This method is only recommended for legacy use in conjunction with event.next."},{"transition":"published message","name":"event.unregister","log":"Removes this session's registration with the event system for a set of given classes. This method is only recommended for legacy use in conjunction with event.next."},{"transition":"published message","name":"host.bugreport_upload","log":"Run xen-bugtool --yestoall and upload the output to support"},{"transition":"published message","name":"host.destroy","log":"Destroy specified host record in database"},{"transition":"published message","name":"host.disable","log":"Puts the host into a state in which no new VMs can be started. Currently active VMs on the host continue to execute."},{"transition":"published message","name":"host.dmesg","log":"Get the host xen dmesg."},{"transition":"published message","name":"host.dmesg_clear","log":"Get the host xen dmesg, and clear the buffer."},{"transition":"published message","name":"host.enable","log":"Puts the host into a state in which new VMs can be started."},{"transition":"published message","name":"host.get_log","log":"Get the host's log file"},{"transition":"published message","name":"host.license_apply","log":"Apply a new license to a host"},{"transition":"published message","name":"host.list_methods","log":"List all supported methods"},{"transition":"published message","name":"host.reboot","log":"Reboot the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)"},{"transition":"published message","name":"host.restart_agent","log":"Restarts the agent after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called."},{"transition":"published message","name":"host.send_debug_keys","log":"Inject the given string as debugging keys into Xen"},{"transition":"published message","name":"host.shutdown","log":"Shutdown the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)"},{"transition":"published message","name":"host_crashdump.destroy","log":"Destroy specified host crash dump, removing it from the disk."},{"transition":"published message","name":"host_crashdump.upload","log":"Upload the specified host crash dump to a specified URL"},{"transition":"published message","name":"host_patch.apply","log":"Apply the selected patch and return its output"},{"transition":"published message","name":"host_patch.destroy","log":"Destroy the specified host patch, removing it from the disk. This does NOT reverse the patch"},{"transition":"published message","name":"pool.create_VLAN","log":"Create PIFs, mapping a network to the same physical interface/VLAN on each host. This call is deprecated: use Pool.create_VLAN_from_PIF instead."},{"transition":"published message","name":"pool.create_VLAN_from_PIF","log":"Create a pool-wide VLAN by taking the PIF."},{"transition":"published message","name":"pool.eject","log":"Instruct a pool master to eject a host from the pool"},{"transition":"published message","name":"pool.emergency_reset_master","log":"Instruct a slave already in a pool that the master has changed"},{"transition":"published message","name":"pool.emergency_transition_to_master","log":"Instruct host that's currently a slave to transition to being master"},{"transition":"published message","name":"pool.join","log":"Instruct host to join a new pool"},{"transition":"published message","name":"pool.join_force","log":"Instruct host to join a new pool"},{"transition":"published message","name":"pool.recover_slaves","log":"Instruct a pool master, M, to try and contact its slaves and, if slaves are in emergency mode, reset their master address to M."},{"transition":"published message","name":"pool.sync_database","log":"Forcibly synchronise the database now"},{"transition":"published message","name":"session.change_password","log":"Change the account password; if your session is authenticated with root privileges then the old_pwd is validated and the new_pwd is set regardless"},{"transition":"published message","name":"session.login_with_password","log":"Attempt to authenticate the user, returning a session reference if successful"},{"transition":"published message","name":"session.logout","log":"Log out of a session"},{"transition":"published message","name":"task.cancel","log":"Request that a task be cancelled. Note that a task may fail to be cancelled and may complete or fail normally and note that, even when a task does cancel, it might take an arbitrary amount of time."},{"transition":"published message","name":"task.create","log":"Create a new task object which must be manually destroyed."},{"transition":"published message","name":"task.destroy","log":"Destroy the task object"}],"miami":[{"transition":"published class","name":"Bond","log":"A Network bond that combines physical network interfaces, also known as link aggregation"},{"transition":"published class","name":"VLAN","log":"A VLAN mux/demux"},{"transition":"published class","name":"pool_patch","log":"Pool-wide patches"},{"transition":"published field","name":"Bond.master","log":"The bonded interface"},{"transition":"published field","name":"Bond.other_config","log":"additional configuration"},{"transition":"published field","name":"Bond.slaves","log":"The interfaces which are part of this bond"},{"transition":"published field","name":"PBD.other_config","log":"additional configuration"},{"transition":"published field","name":"PIF.DNS","log":"Comma separated list of the IP addresses of the DNS servers to use"},{"transition":"published field","name":"PIF.IP","log":"IP address"},{"transition":"published field","name":"PIF.VLAN_master_of","log":"Indicates which VLAN this interface receives untagged traffic from"},{"transition":"published field","name":"PIF.VLAN_slave_of","log":"Indicates which VLANs this interface transmits tagged traffic to"},{"transition":"published field","name":"PIF.bond_master_of","log":"Indicates this PIF represents the results of a bond"},{"transition":"published field","name":"PIF.bond_slave_of","log":"Indicates which bond this interface is part of"},{"transition":"published field","name":"PIF.currently_attached","log":"true if this interface is online"},{"transition":"published field","name":"PIF.gateway","log":"IP gateway"},{"transition":"published field","name":"PIF.ip_configuration_mode","log":"Sets if and how this interface gets an IP address"},{"transition":"published field","name":"PIF.management","log":"Indicates whether the control software is listening for connections on this interface"},{"transition":"published field","name":"PIF.netmask","log":"IP netmask"},{"transition":"published field","name":"PIF.other_config","log":"Additional configuration"},{"transition":"published field","name":"PIF.physical","log":"true if this represents a physical network interface"},{"transition":"published field","name":"SM.capabilities","log":"capabilities of the SM plugin"},{"transition":"published field","name":"SM.other_config","log":"additional configuration"},{"transition":"published field","name":"SR.sm_config","log":"SM dependent data"},{"transition":"published field","name":"VBD.unpluggable","log":"true if this VBD will support hot-unplug"},{"transition":"published field","name":"VDI.location","log":"location information"},{"transition":"published field","name":"VDI.sm_config","log":"SM dependent data"},{"transition":"published field","name":"VDI.xenstore_data","log":"data to be inserted into the xenstore tree (/local/domain/0/backend/vbd///sm-data) after the VDI is attached. This is generally set by the SM backends on vdi_attach."},{"transition":"published field","name":"VLAN.other_config","log":"additional configuration"},{"transition":"published field","name":"VLAN.tag","log":"VLAN tag in use"},{"transition":"published field","name":"VLAN.tagged_PIF","log":"interface on which traffic is tagged"},{"transition":"published field","name":"VLAN.untagged_PIF","log":"interface on which traffic is untagged"},{"transition":"published field","name":"VM.HVM_shadow_multiplier","log":"multiplier applied to the amount of shadow that will be made available to the guest"},{"transition":"published field","name":"VM.last_booted_record","log":"Marshalled value containing VM record at time of last boot, updated dynamically to reflect the runtime state of the domain"},{"transition":"published field","name":"VM.xenstore_data","log":"data to be inserted into the xenstore tree (/local/domain//vm-data) after the VM is created."},{"transition":"published field","name":"crashdump.other_config","log":"additional configuration"},{"transition":"published field","name":"host_crashdump.other_config","log":"additional configuration"},{"transition":"published field","name":"host_patch.other_config","log":"additional configuration"},{"transition":"published field","name":"host_patch.pool_patch","log":"The patch applied"},{"transition":"published field","name":"pool_patch.after_apply_guidance","log":"What the client should do after this patch has been applied."},{"transition":"published field","name":"pool_patch.host_patches","log":"This hosts this patch is applied to."},{"transition":"published field","name":"pool_patch.other_config","log":"additional configuration"},{"transition":"published field","name":"pool_patch.pool_applied","log":"This patch should be applied across the entire pool"},{"transition":"published field","name":"pool_patch.size","log":"Size of the patch"},{"transition":"published field","name":"pool_patch.version","log":"Patch version number"},{"transition":"published field","name":"session.other_config","log":"additional configuration"},{"transition":"published field","name":"task.other_config","log":"additional configuration"},{"transition":"published message","name":"Bond.create","log":"Create an interface bond"},{"transition":"published message","name":"Bond.destroy","log":"Destroy an interface bond"},{"transition":"published message","name":"PBD.set_device_config","log":"Sets the PBD's device_config field"},{"transition":"published message","name":"PIF.forget","log":"Destroy the PIF object matching a particular network interface"},{"transition":"published message","name":"PIF.introduce","log":"Create a PIF object matching a particular network interface"},{"transition":"published message","name":"PIF.plug","log":"Attempt to bring up a physical interface"},{"transition":"published message","name":"PIF.reconfigure_ip","log":"Reconfigure the IP address settings for this interface"},{"transition":"published message","name":"PIF.scan","log":"Scan for physical interfaces on a host and create PIF objects to represent them"},{"transition":"published message","name":"PIF.unplug","log":"Attempt to bring down a physical interface"},{"transition":"published message","name":"SR.probe","log":"Perform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config."},{"transition":"published message","name":"SR.set_physical_size","log":"Sets the SR's physical_size field"},{"transition":"published message","name":"VDI.introduce","log":"Create a new VDI record in the database only"},{"transition":"published message","name":"VLAN.create","log":"Create a VLAN mux/demuxer"},{"transition":"published message","name":"VLAN.destroy","log":"Destroy a VLAN mux/demuxer"},{"transition":"published message","name":"VM.maximise_memory","log":"Returns the maximum amount of guest memory which will fit, together with overheads, in the supplied amount of physical memory. If 'exact' is true then an exact calculation is performed using the VM's current settings. If 'exact' is false then a more conservative approximation is used"},{"transition":"published message","name":"host.assert_can_evacuate","log":"Check this host can be evacuated."},{"transition":"published message","name":"host.evacuate","log":"Migrate all VMs off of this host, where possible."},{"transition":"published message","name":"host.get_system_status_capabilities","log":""},{"transition":"published message","name":"host.local_management_reconfigure","log":"Reconfigure the management network interface. Should only be used if Host.management_reconfigure is impossible because the network configuration is broken."},{"transition":"published message","name":"host.management_disable","log":"Disable the management network interface"},{"transition":"published message","name":"host.management_reconfigure","log":"Reconfigure the management network interface"},{"transition":"published message","name":"host.set_hostname_live","log":"Sets the host name to the specified string. Both the API and lower-level system hostname are changed immediately."},{"transition":"published message","name":"host.syslog_reconfigure","log":"Re-configure syslog logging"},{"transition":"published message","name":"pool.designate_new_master","log":"Perform an orderly handover of the role of master to the referenced host."},{"transition":"published message","name":"pool.disable_ha","log":"Turn off High Availability mode"},{"transition":"published message","name":"pool.enable_ha","log":"Turn on High Availability mode"},{"transition":"published message","name":"pool_patch.apply","log":"Apply the selected patch to a host and return its output"},{"transition":"published message","name":"pool_patch.clean","log":"Removes the patch's files from the server"},{"transition":"published message","name":"pool_patch.destroy","log":"Removes the patch's files from all hosts in the pool, and removes the database entries. Only works on unapplied patches."},{"transition":"published message","name":"pool_patch.pool_apply","log":"Apply the selected patch to all hosts in the pool and return a map of host_ref -> patch output"},{"transition":"published message","name":"pool_patch.precheck","log":"Execute the precheck stage of the selected patch on a host and return its output"},{"transition":"published message","name":"session.local_logout","log":"Log out of local session."},{"transition":"published message","name":"session.slave_local_login_with_password","log":"Authenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host."},{"transition":"deprecated message","name":"PIF.create_VLAN","log":"Replaced by VLAN.create"},{"transition":"deprecated message","name":"PIF.destroy","log":"Replaced by VLAN.destroy and Bond.destroy"},{"transition":"deprecated message","name":"SR.make","log":"Use SR.create instead"},{"transition":"deprecated message","name":"host_patch.apply","log":""},{"transition":"deprecated message","name":"host_patch.destroy","log":""}],"symc":[{"transition":"published message","name":"SR.update","log":"Refresh the fields on the SR object"},{"transition":"published message","name":"VDI.update","log":"Ask the storage backend to refresh the fields in the VDI object"}],"orlando":[{"transition":"published class","name":"blob","log":"A placeholder for a binary blob"},{"transition":"published class","name":"data_source","log":"Data sources for logging in RRDs"},{"transition":"published class","name":"message","log":"An message for the attention of the administrator"},{"transition":"published field","name":"PIF.disallow_unplug","log":"Prevent this PIF from being unplugged; set this to notify the management tool-stack that the PIF has a special use and should not be unplugged under any circumstances (e.g. because you're running storage traffic over it)"},{"transition":"published field","name":"PIF_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"SM.driver_filename","log":"filename of the storage driver"},{"transition":"published field","name":"SR.blobs","log":"Binary blobs associated with this SR"},{"transition":"published field","name":"SR.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"VBD_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"VDI.is_a_snapshot","log":"true if this is a snapshot."},{"transition":"published field","name":"VDI.snapshot_of","log":"Ref pointing to the VDI this snapshot is of."},{"transition":"published field","name":"VDI.snapshot_time","log":"Date/time when this snapshot was created."},{"transition":"published field","name":"VDI.snapshots","log":"List pointing to all the VDIs snapshots."},{"transition":"published field","name":"VDI.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"VIF_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"VM.blobs","log":"Binary blobs associated with this VM"},{"transition":"published field","name":"VM.blocked_operations","log":"List of operations which have been explicitly blocked and an error code"},{"transition":"published field","name":"VM.ha_always_run","log":"if true then the system will attempt to keep the VM running as much as possible."},{"transition":"published field","name":"VM.ha_restart_priority","log":"has possible values: \"best-effort\" meaning \"try to restart this VM if possible but don't consider the Pool to be overcommitted if this is not possible\"; \"restart\" meaning \"this VM should be restarted\"; \"\" meaning \"do not try to restart this VM\""},{"transition":"published field","name":"VM.is_a_snapshot","log":"true if this is a snapshot. Snapshotted VMs can never be started, they are used only for cloning other VMs"},{"transition":"published field","name":"VM.snapshot_of","log":"Ref pointing to the VM this snapshot is of."},{"transition":"published field","name":"VM.snapshot_time","log":"Date/time when this snapshot was created."},{"transition":"published field","name":"VM.snapshots","log":"List pointing to all the VM snapshots."},{"transition":"published field","name":"VM.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"VM.transportable_snapshot_id","log":"Transportable ID of the snapshot VM"},{"transition":"published field","name":"VM_guest_metrics.live","log":"True if the guest is sending heartbeat messages via the guest agent"},{"transition":"published field","name":"VM_guest_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"VM_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"host.blobs","log":"Binary blobs associated with this host"},{"transition":"published field","name":"host.ha_network_peers","log":"The set of hosts visible via the network from this host"},{"transition":"published field","name":"host.ha_statefiles","log":"The set of statefiles accessible from this host"},{"transition":"published field","name":"host.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"host_cpu.other_config","log":"additional configuration"},{"transition":"published field","name":"host_metrics.other_config","log":"additional configuration"},{"transition":"published field","name":"message.cls","log":"The class of the object this message is associated with"},{"transition":"published field","name":"network.blobs","log":"Binary blobs associated with this network"},{"transition":"published field","name":"network.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"pool.blobs","log":"Binary blobs associated with this pool"},{"transition":"published field","name":"pool.gui_config","log":"gui-specific configuration for pool"},{"transition":"published field","name":"pool.ha_allow_overcommit","log":"If set to false then operations which would cause the Pool to become overcommitted will be blocked."},{"transition":"published field","name":"pool.ha_configuration","log":"The current HA configuration"},{"transition":"published field","name":"pool.ha_enabled","log":"true if HA is enabled on the pool, false otherwise"},{"transition":"published field","name":"pool.ha_host_failures_to_tolerate","log":"Number of host failures to tolerate before the Pool is declared to be overcommitted"},{"transition":"published field","name":"pool.ha_overcommitted","log":"True if the Pool is considered to be overcommitted i.e. if there exist insufficient physical resources to tolerate the configured number of host failures"},{"transition":"published field","name":"pool.ha_plan_exists_for","log":"Number of future host failures we have managed to find a plan for. Once this reaches zero any future host failures will cause the failure of protected VMs."},{"transition":"published field","name":"pool.ha_statefiles","log":"HA statefile VDIs in use"},{"transition":"published field","name":"pool.tags","log":"user-specified tags for categorization purposes"},{"transition":"published field","name":"task.subtask_of","log":"Ref pointing to the task this is a substask of."},{"transition":"published field","name":"task.subtasks","log":"List pointing to all the substasks."},{"transition":"published field","name":"user.other_config","log":"additional configuration"},{"transition":"published message","name":"PIF.db_forget","log":"Destroy a PIF database record."},{"transition":"published message","name":"PIF.db_introduce","log":"Create a new PIF record in the database only"},{"transition":"published message","name":"PIF.set_disallow_unplug","log":"Set whether unplugging the PIF is allowed"},{"transition":"published message","name":"SR.assert_can_host_ha_statefile","log":"Returns successfully if the given SR can host an HA statefile. Otherwise returns an error to explain why not"},{"transition":"published message","name":"SR.create_new_blob","log":"Create a placeholder for a named binary blob of data that is associated with this SR"},{"transition":"published message","name":"VM.assert_agile","log":"Returns an error if the VM is not considered agile e.g. because it is tied to a resource local to a host"},{"transition":"published message","name":"VM.create_new_blob","log":"Create a placeholder for a named binary blob of data that is associated with this VM"},{"transition":"published message","name":"VM.forget_data_source_archives","log":"Forget the recorded statistics related to the specified data source"},{"transition":"published message","name":"VM.get_data_sources","log":""},{"transition":"published message","name":"VM.query_data_source","log":"Query the latest value of the specified data source"},{"transition":"published message","name":"VM.record_data_source","log":"Start recording the specified data source"},{"transition":"published message","name":"VM.set_ha_always_run","log":"Set the value of the ha_always_run"},{"transition":"published message","name":"VM.set_ha_restart_priority","log":"Set the value of the ha_restart_priority field"},{"transition":"published message","name":"VM.set_memory_static_max","log":"Set the value of the memory_static_max field"},{"transition":"published message","name":"VM.snapshot","log":"Snapshots the specified VM, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)."},{"transition":"published message","name":"VM.snapshot_with_quiesce","log":"Snapshots the specified VM with quiesce, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)."},{"transition":"published message","name":"VM.wait_memory_target_live","log":"Wait for a running VM to reach its current memory target"},{"transition":"published message","name":"blob.create","log":"Create a placeholder for a binary blob"},{"transition":"published message","name":"blob.destroy","log":""},{"transition":"published message","name":"host.backup_rrds","log":"This causes the RRDs to be backed up to the master"},{"transition":"published message","name":"host.call_plugin","log":"Call an API plugin on this host"},{"transition":"published message","name":"host.compute_free_memory","log":"Computes the amount of free memory on the host."},{"transition":"published message","name":"host.create_new_blob","log":"Create a placeholder for a named binary blob of data that is associated with this host"},{"transition":"published message","name":"host.emergency_ha_disable","log":"This call disables HA on the local host. This should only be used with extreme care."},{"transition":"published message","name":"host.forget_data_source_archives","log":"Forget the recorded statistics related to the specified data source"},{"transition":"published message","name":"host.get_data_sources","log":""},{"transition":"published message","name":"host.get_servertime","log":"This call queries the host's clock for the current time"},{"transition":"published message","name":"host.get_vms_which_prevent_evacuation","log":"Return a set of VMs which prevent the host being evacuated, with per-VM error codes"},{"transition":"published message","name":"host.power_on","log":"Attempt to power-on the host (if the capability exists)."},{"transition":"published message","name":"host.query_data_source","log":"Query the latest value of the specified data source"},{"transition":"published message","name":"host.record_data_source","log":"Start recording the specified data source"},{"transition":"published message","name":"host.shutdown_agent","log":"Shuts the agent down after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called."},{"transition":"published message","name":"host.sync_data","log":"This causes the synchronisation of the non-database data (messages, RRDs and so on) stored on the master to be synchronised with the host"},{"transition":"published message","name":"message.create","log":""},{"transition":"published message","name":"message.destroy","log":""},{"transition":"published message","name":"message.get","log":""},{"transition":"published message","name":"message.get_all","log":""},{"transition":"published message","name":"message.get_all_records","log":""},{"transition":"published message","name":"message.get_all_records_where","log":""},{"transition":"published message","name":"message.get_by_uuid","log":""},{"transition":"published message","name":"message.get_record","log":""},{"transition":"published message","name":"message.get_since","log":""},{"transition":"published message","name":"network.create_new_blob","log":"Create a placeholder for a named binary blob of data that is associated with this pool"},{"transition":"published message","name":"pool.create_new_blob","log":"Create a placeholder for a named binary blob of data that is associated with this pool"},{"transition":"published message","name":"pool.ha_compute_hypothetical_max_host_failures_to_tolerate","log":"Returns the maximum number of host failures we could tolerate before we would be unable to restart the provided VMs"},{"transition":"published message","name":"pool.ha_compute_max_host_failures_to_tolerate","log":"Returns the maximum number of host failures we could tolerate before we would be unable to restart configured VMs"},{"transition":"published message","name":"pool.ha_compute_vm_failover_plan","log":"Return a VM failover plan assuming a given subset of hosts fail"},{"transition":"published message","name":"pool.ha_failover_plan_exists","log":"Returns true if a VM failover plan exists for up to 'n' host failures"},{"transition":"published message","name":"pool.set_ha_host_failures_to_tolerate","log":"Set the maximum number of host failures to consider in the HA VM restart planner"},{"transition":"removed field","name":"VM_guest_metrics.disks","log":"No data"}],"orlando-update-1":[{"transition":"published message","name":"pool.ha_prevent_restarts_for","log":"When this call returns the VM restart logic will not run for the requested number of seconds. If the argument is zero then the restart thread is immediately unblocked"}],"george":[{"transition":"published class","name":"auth","log":"Management of remote authentication services"},{"transition":"published class","name":"subject","log":"A user or group that can log in xapi"},{"transition":"published field","name":"VIF.MAC_autogenerated","log":"true if the MAC was autogenerated; false indicates it was set manually"},{"transition":"published field","name":"host.external_auth_configuration","log":"configuration specific to external authentication service"},{"transition":"published field","name":"host.external_auth_service_name","log":"name of external authentication service configured; empty if none configured."},{"transition":"published field","name":"host.external_auth_type","log":"type of external authentication service configured; empty if none configured."},{"transition":"published field","name":"pool.wlb_enabled","log":"true if workload balancing is enabled on the pool, false otherwise"},{"transition":"published field","name":"pool.wlb_url","log":"Url for the configured workload balancing host"},{"transition":"published field","name":"pool.wlb_username","log":"Username for accessing the workload balancing host"},{"transition":"published field","name":"pool.wlb_verify_cert","log":"true if communication with the WLB server should enforce TLS certificate verification."},{"transition":"published field","name":"session.auth_user_sid","log":"the subject identifier of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."},{"transition":"published field","name":"session.is_local_superuser","log":"true iff this session was created using local superuser credentials"},{"transition":"published field","name":"session.subject","log":"references the subject instance that created the session. If a session instance has is_local_superuser set, then the value of this field is undefined."},{"transition":"published field","name":"session.validation_time","log":"time when session was last validated"},{"transition":"published field","name":"subject.other_config","log":"additional configuration"},{"transition":"published field","name":"subject.subject_identifier","log":"the subject identifier, unique in the external directory service"},{"transition":"published message","name":"VDI.set_sharable","log":"Sets the VDI's sharable field"},{"transition":"published message","name":"VM.retrieve_wlb_recommendations","log":"Returns mapping of hosts to ratings, indicating the suitability of starting the VM at that location according to wlb. Rating is replaced with an error if the VM cannot boot there."},{"transition":"published message","name":"auth.get_group_membership","log":"This calls queries the external directory service to obtain the transitively-closed set of groups that the the subject_identifier is member of."},{"transition":"published message","name":"auth.get_subject_identifier","log":"This call queries the external directory service to obtain the subject_identifier as a string from the human-readable subject_name"},{"transition":"published message","name":"auth.get_subject_information_from_identifier","log":"This call queries the external directory service to obtain the user information (e.g. username, organization etc) from the specified subject_identifier"},{"transition":"published message","name":"host.disable_external_auth","log":"This call disables external authentication on the local host"},{"transition":"published message","name":"host.enable_external_auth","log":"This call enables external authentication on a host"},{"transition":"published message","name":"host.get_server_certificate","log":"Get the installed server public TLS certificate."},{"transition":"published message","name":"host.retrieve_wlb_evacuate_recommendations","log":"Retrieves recommended host migrations to perform when evacuating the host from the wlb server. If a VM cannot be migrated from the host the reason is listed instead of a recommendation."},{"transition":"published message","name":"pool.certificate_install","log":"Install TLS CA certificate"},{"transition":"published message","name":"pool.certificate_list","log":"List installed TLS CA certificate"},{"transition":"published message","name":"pool.certificate_sync","log":"Copy the TLS CA certificates and CRLs of the master to all slaves."},{"transition":"published message","name":"pool.certificate_uninstall","log":"Install TLS CA certificate"},{"transition":"published message","name":"pool.crl_install","log":"Install a TLS CA-issued Certificate Revocation List, pool-wide."},{"transition":"published message","name":"pool.crl_list","log":"List the names of all installed TLS CA-issued Certificate Revocation Lists."},{"transition":"published message","name":"pool.crl_uninstall","log":"Remove a pool-wide TLS CA-issued Certificate Revocation List."},{"transition":"published message","name":"pool.deconfigure_wlb","log":"Permanently deconfigures workload balancing monitoring on this pool"},{"transition":"published message","name":"pool.detect_nonhomogeneous_external_auth","log":"This call asynchronously detects if the external authentication configuration in any slave is different from that in the master and raises appropriate alerts"},{"transition":"published message","name":"pool.disable_external_auth","log":"This call disables external authentication on all the hosts of the pool"},{"transition":"published message","name":"pool.enable_external_auth","log":"This call enables external authentication on all the hosts of the pool"},{"transition":"published message","name":"pool.initialize_wlb","log":"Initializes workload balancing monitoring on this pool with the specified wlb server"},{"transition":"published message","name":"pool.retrieve_wlb_configuration","log":"Retrieves the pool optimization criteria from the workload balancing server"},{"transition":"published message","name":"pool.retrieve_wlb_recommendations","log":"Retrieves vm migrate recommendations for the pool from the workload balancing server"},{"transition":"published message","name":"pool.send_test_post","log":"Send the given body to the given host and port, using HTTPS, and print the response. This is used for debugging the SSL layer."},{"transition":"published message","name":"pool.send_wlb_configuration","log":"Sets the pool optimization criteria for the workload balancing server"},{"transition":"published message","name":"session.get_all_subject_identifiers","log":"Return a list of all the user subject-identifiers of all existing sessions"},{"transition":"published message","name":"session.logout_subject_identifier","log":"Log out all sessions associated to a user subject-identifier, except the session associated with the context calling this function"},{"transition":"deprecated class","name":"user","log":"Deprecated in favor of subject"},{"transition":"removed field","name":"VM_guest_metrics.memory","log":"Disabled in favour of the RRDs, to improve scalability"}],"midnight-ride":[{"transition":"published class","name":"role","log":"A set of permissions associated with a subject"},{"transition":"published class","name":"secret","log":"A secret"},{"transition":"published field","name":"VM.bios_strings","log":"BIOS strings"},{"transition":"published field","name":"VM.children","log":"List pointing to all the children of this VM"},{"transition":"published field","name":"VM.parent","log":"Ref pointing to the parent of this VM"},{"transition":"published field","name":"VM.snapshot_info","log":"Human-readable information concerning this snapshot"},{"transition":"published field","name":"VM.snapshot_metadata","log":"Encoded information about the VM's metadata this is a snapshot of"},{"transition":"published field","name":"host.bios_strings","log":"BIOS strings"},{"transition":"published field","name":"host.cpu_info","log":"Details about the physical CPUs on this host"},{"transition":"published field","name":"host.edition","log":"Product edition"},{"transition":"published field","name":"host.license_server","log":"Contact information of the license server"},{"transition":"published field","name":"host.power_on_config","log":"The power on config"},{"transition":"published field","name":"host.power_on_mode","log":"The power on mode"},{"transition":"published field","name":"network.MTU","log":"MTU in octets"},{"transition":"published field","name":"pool.redo_log_enabled","log":"true a redo-log is to be used other than when HA is enabled, false otherwise"},{"transition":"published field","name":"pool.redo_log_vdi","log":"indicates the VDI to use for the redo-log other than when HA is enabled"},{"transition":"published field","name":"pool.restrictions","log":"Pool-wide restrictions currently in effect"},{"transition":"published field","name":"pool.vswitch_controller","log":"the IP address of the vswitch controller."},{"transition":"published field","name":"role.name_description","log":"what this role is for"},{"transition":"published field","name":"role.name_label","log":"a short user-friendly name for the role"},{"transition":"published field","name":"role.subroles","log":"a list of pointers to other roles or permissions"},{"transition":"published field","name":"session.auth_user_name","log":"the subject name of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."},{"transition":"published field","name":"session.parent","log":"references the parent session that created this session"},{"transition":"published field","name":"session.rbac_permissions","log":"list with all RBAC permissions for this session"},{"transition":"published field","name":"session.tasks","log":"list of tasks created using the current session"},{"transition":"published field","name":"subject.roles","log":"the roles associated with this subject"},{"transition":"published message","name":"VM.checkpoint","log":"Checkpoints the specified VM, making a new VM. Checkpoint automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write) and saves the memory image as well."},{"transition":"published message","name":"VM.compute_memory_overhead","log":"Computes the virtualization memory overhead of a VM."},{"transition":"published message","name":"VM.copy_bios_strings","log":"Copy the BIOS strings from the given host to this VM"},{"transition":"published message","name":"VM.get_cooperative","log":"Return true if the VM is currently 'co-operative' i.e. is expected to reach a balloon target and actually has done"},{"transition":"published message","name":"VM.revert","log":"Reverts the specified VM to a previous state."},{"transition":"published message","name":"VM.set_HVM_shadow_multiplier","log":"Set the shadow memory multiplier on a halted VM"},{"transition":"published message","name":"VM.set_VCPUs_at_startup","log":"Set the number of startup VCPUs for a halted VM"},{"transition":"published message","name":"VM.set_VCPUs_max","log":"Set the maximum number of VCPUs for a halted VM"},{"transition":"published message","name":"VM.set_memory_dynamic_max","log":"Set the value of the memory_dynamic_max field"},{"transition":"published message","name":"VM.set_memory_dynamic_min","log":"Set the value of the memory_dynamic_min field"},{"transition":"published message","name":"VM.set_memory_dynamic_range","log":"Set the minimum and maximum amounts of physical memory the VM is allowed to use."},{"transition":"published message","name":"VM.set_memory_limits","log":"Set the memory limits of this VM."},{"transition":"published message","name":"VM.set_memory_static_min","log":"Set the value of the memory_static_min field"},{"transition":"published message","name":"VM.set_memory_static_range","log":"Set the static (ie boot-time) range of virtual memory that the VM is allowed to use."},{"transition":"published message","name":"host.apply_edition","log":"Change to another edition, or reactivate the current edition after a license has expired. This may be subject to the successful checkout of an appropriate license."},{"transition":"published message","name":"host.compute_memory_overhead","log":"Computes the virtualization memory overhead of a host."},{"transition":"published message","name":"host.get_uncooperative_resident_VMs","log":"Return a set of VMs which are not co-operating with the host's memory control system"},{"transition":"published message","name":"host.refresh_pack_info","log":"Refresh the list of installed Supplemental Packs."},{"transition":"published message","name":"host.reset_cpu_features","log":"Remove the feature mask, such that after a reboot all features of the CPU are enabled."},{"transition":"published message","name":"host.set_cpu_features","log":"Set the CPU features to be used after a reboot, if the given features string is valid."},{"transition":"published message","name":"pool.disable_redo_log","log":"Disable the redo log if in use, unless HA is enabled."},{"transition":"published message","name":"pool.enable_redo_log","log":"Enable the redo log on the given SR and start using it, unless HA is enabled."},{"transition":"published message","name":"pool.set_vswitch_controller","log":"Set the IP address of the vswitch controller."},{"transition":"published message","name":"role.get_by_permission","log":"This call returns a list of roles given a permission"},{"transition":"published message","name":"role.get_by_permission_name_label","log":"This call returns a list of roles given a permission name"},{"transition":"published message","name":"role.get_permissions","log":"This call returns a list of permissions given a role"},{"transition":"published message","name":"role.get_permissions_name_label","log":"This call returns a list of permission names given a role"},{"transition":"published message","name":"subject.add_to_roles","log":"This call adds a new role to a subject"},{"transition":"published message","name":"subject.get_permissions_name_label","log":"This call returns a list of permission names given a subject"},{"transition":"published message","name":"subject.remove_from_roles","log":"This call removes a role from a subject"},{"transition":"deprecated class","name":"host_cpu","log":"Deprecated in favour of the Host.cpu_info field"},{"transition":"deprecated field","name":"VM.memory_target","log":""},{"transition":"deprecated field","name":"host_metrics.memory_free","log":"Will be disabled in favour of RRD"},{"transition":"deprecated message","name":"VM.set_memory_target_live","log":""},{"transition":"deprecated message","name":"VM.wait_memory_target_live","log":""}],"cowley":[{"transition":"published class","name":"VMPP","log":"VM Protection Policy"},{"transition":"published class","name":"tunnel","log":"A tunnel for network traffic"},{"transition":"published field","name":"PIF.tunnel_access_PIF_of","log":"Indicates to which tunnel this PIF gives access"},{"transition":"published field","name":"PIF.tunnel_transport_PIF_of","log":"Indicates to which tunnel this PIF provides transport"},{"transition":"published field","name":"SR.local_cache_enabled","log":"True if this SR is assigned to be the local cache for its host"},{"transition":"published field","name":"VDI.allow_caching","log":"true if this VDI is to be cached in the local cache SR"},{"transition":"published field","name":"VDI.on_boot","log":"The behaviour of this VDI on a VM boot"},{"transition":"published field","name":"VM.is_snapshot_from_vmpp","log":"true if this snapshot was created by the protection policy"},{"transition":"published field","name":"VM.protection_policy","log":"Ref pointing to a protection policy for this VM"},{"transition":"published field","name":"VMPP.VMs","log":"all VMs attached to this protection policy"},{"transition":"published field","name":"VMPP.alarm_config","log":"configuration for the alarm"},{"transition":"published field","name":"VMPP.archive_frequency","log":"frequency of the archive schedule"},{"transition":"published field","name":"VMPP.archive_last_run_time","log":"time of the last archive"},{"transition":"published field","name":"VMPP.archive_schedule","log":"schedule of the archive containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone"},{"transition":"published field","name":"VMPP.archive_target_config","log":"configuration for the archive, including its 'location', 'username', 'password'"},{"transition":"published field","name":"VMPP.archive_target_type","log":"type of the archive target config"},{"transition":"published field","name":"VMPP.backup_frequency","log":"frequency of the backup schedule"},{"transition":"published field","name":"VMPP.backup_last_run_time","log":"time of the last backup"},{"transition":"published field","name":"VMPP.backup_retention_value","log":"maximum number of backups that should be stored at any time"},{"transition":"published field","name":"VMPP.backup_schedule","log":"schedule of the backup containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone"},{"transition":"published field","name":"VMPP.backup_type","log":"type of the backup sub-policy"},{"transition":"published field","name":"VMPP.is_alarm_enabled","log":"true if alarm is enabled for this policy"},{"transition":"published field","name":"VMPP.is_archive_running","log":"true if this protection policy's archive is running"},{"transition":"published field","name":"VMPP.is_backup_running","log":"true if this protection policy's backup is running"},{"transition":"published field","name":"VMPP.is_policy_enabled","log":"enable or disable this policy"},{"transition":"published field","name":"VMPP.recent_alerts","log":"recent alerts"},{"transition":"published field","name":"VMPP.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host.local_cache_sr","log":"The SR that is used as a local cache"},{"transition":"published field","name":"tunnel.access_PIF","log":"The interface through which the tunnel is accessed"},{"transition":"published field","name":"tunnel.other_config","log":"Additional configuration"},{"transition":"published field","name":"tunnel.status","log":"Status information about the tunnel"},{"transition":"published field","name":"tunnel.transport_PIF","log":"The interface used by the tunnel"},{"transition":"published field","name":"tunnel.uuid","log":"Unique identifier/object reference"},{"transition":"published message","name":"VDI.set_allow_caching","log":"Set the value of the allow_caching parameter. This value can only be changed when the VDI is not attached to a running VM. The caching behaviour is only affected by this flag for VHD-based VDIs that have one parent and no child VHDs. Moreover, caching only takes place when the host running the VM containing this VDI has a nominated SR for local caching."},{"transition":"published message","name":"VDI.set_on_boot","log":"Set the value of the on_boot parameter. This value can only be changed when the VDI is not attached to a running VM."},{"transition":"published message","name":"VM.set_protection_policy","log":"Set the value of the protection_policy field"},{"transition":"published message","name":"VMPP.add_to_alarm_config","log":""},{"transition":"published message","name":"VMPP.add_to_archive_schedule","log":""},{"transition":"published message","name":"VMPP.add_to_archive_target_config","log":""},{"transition":"published message","name":"VMPP.add_to_backup_schedule","log":""},{"transition":"published message","name":"VMPP.archive_now","log":"This call archives the snapshot provided as a parameter"},{"transition":"published message","name":"VMPP.get_alerts","log":"This call fetches a history of alerts for a given protection policy"},{"transition":"published message","name":"VMPP.protect_now","log":"This call executes the protection policy immediately"},{"transition":"published message","name":"VMPP.remove_from_alarm_config","log":""},{"transition":"published message","name":"VMPP.remove_from_archive_schedule","log":""},{"transition":"published message","name":"VMPP.remove_from_archive_target_config","log":""},{"transition":"published message","name":"VMPP.remove_from_backup_schedule","log":""},{"transition":"published message","name":"VMPP.set_alarm_config","log":""},{"transition":"published message","name":"VMPP.set_archive_frequency","log":"Set the value of the archive_frequency field"},{"transition":"published message","name":"VMPP.set_archive_last_run_time","log":""},{"transition":"published message","name":"VMPP.set_archive_schedule","log":""},{"transition":"published message","name":"VMPP.set_archive_target_config","log":""},{"transition":"published message","name":"VMPP.set_archive_target_type","log":"Set the value of the archive_target_config_type field"},{"transition":"published message","name":"VMPP.set_backup_frequency","log":"Set the value of the backup_frequency field"},{"transition":"published message","name":"VMPP.set_backup_last_run_time","log":""},{"transition":"published message","name":"VMPP.set_backup_retention_value","log":""},{"transition":"published message","name":"VMPP.set_backup_schedule","log":""},{"transition":"published message","name":"VMPP.set_is_alarm_enabled","log":"Set the value of the is_alarm_enabled field"},{"transition":"published message","name":"host.disable_local_storage_caching","log":"Disable the use of a local SR for caching purposes"},{"transition":"published message","name":"host.enable_local_storage_caching","log":"Enable the use of a local SR for caching purposes"},{"transition":"published message","name":"host.get_server_localtime","log":"This call queries the host's clock for the current time in the host's local timezone"},{"transition":"published message","name":"host.set_power_on_mode","log":"Set the power-on-mode, host, user and password"},{"transition":"published message","name":"pool.disable_local_storage_caching","log":"This call disables pool-wide local storage caching"},{"transition":"published message","name":"pool.enable_local_storage_caching","log":"This call attempts to enable pool-wide local storage caching"},{"transition":"published message","name":"pool.test_archive_target","log":"This call tests if a location is valid"},{"transition":"published message","name":"tunnel.create","log":"Create a tunnel"},{"transition":"published message","name":"tunnel.destroy","log":"Destroy a tunnel"},{"transition":"extended message","name":"VDI.copy","log":"The copy can now be performed between any two SRs."},{"transition":"extended message","name":"VM.copy","log":"The copy can now be performed between any two SRs."},{"transition":"extended message","name":"pool.set_vswitch_controller","log":"Allow to be set to the empty string (no controller is used)."}],"boston":[{"transition":"published class","name":"DR_task","log":"DR task"},{"transition":"published class","name":"GPU_group","log":"A group of compatible GPUs across the resource pool"},{"transition":"published class","name":"PCI","log":"A PCI device"},{"transition":"published class","name":"PGPU","log":"A physical GPU (pGPU)"},{"transition":"published class","name":"VGPU","log":"A virtual GPU (vGPU)"},{"transition":"published class","name":"VM_appliance","log":"VM appliance"},{"transition":"published field","name":"Bond.mode","log":"The algorithm used to distribute traffic among the bonded NICs"},{"transition":"published field","name":"Bond.primary_slave","log":"The PIF of which the IP configuration and MAC were copied to the bond, and which will receive all configuration/VLANs/VIFs on the bond if the bond is destroyed"},{"transition":"published field","name":"GPU_group.GPU_types","log":"List of GPU types (vendor+device ID) that can be in this group"},{"transition":"published field","name":"GPU_group.PGPUs","log":"List of pGPUs in the group"},{"transition":"published field","name":"GPU_group.VGPUs","log":"List of vGPUs using the group"},{"transition":"published field","name":"GPU_group.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"GPU_group.name_label","log":"a human-readable name"},{"transition":"published field","name":"GPU_group.other_config","log":"Additional configuration"},{"transition":"published field","name":"GPU_group.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PCI.class_name","log":"PCI class name"},{"transition":"published field","name":"PCI.dependencies","log":"List of dependent PCI devices"},{"transition":"published field","name":"PCI.device_name","log":"Device name"},{"transition":"published field","name":"PCI.host","log":"Physical machine that owns the PCI device"},{"transition":"published field","name":"PCI.other_config","log":"Additional configuration"},{"transition":"published field","name":"PCI.pci_id","log":"PCI ID of the physical device"},{"transition":"published field","name":"PCI.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PCI.vendor_name","log":"Vendor name"},{"transition":"published field","name":"PGPU.GPU_group","log":"GPU group the pGPU is contained in"},{"transition":"published field","name":"PGPU.PCI","log":"Link to underlying PCI device"},{"transition":"published field","name":"PGPU.host","log":"Host that owns the GPU"},{"transition":"published field","name":"PGPU.other_config","log":"Additional configuration"},{"transition":"published field","name":"PGPU.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"SR.introduced_by","log":"The disaster recovery task which introduced this SR"},{"transition":"published field","name":"VDI.metadata_latest","log":"Whether this VDI contains the latest known accessible metadata for the pool"},{"transition":"published field","name":"VDI.metadata_of_pool","log":"The pool whose metadata is contained in this VDI"},{"transition":"published field","name":"VGPU.GPU_group","log":"GPU group used by the vGPU"},{"transition":"published field","name":"VGPU.VM","log":"VM that owns the vGPU"},{"transition":"published field","name":"VGPU.currently_attached","log":"Reflects whether the virtual device is currently connected to a physical device"},{"transition":"published field","name":"VGPU.device","log":"Order in which the devices are plugged into the VM"},{"transition":"published field","name":"VGPU.other_config","log":"Additional configuration"},{"transition":"published field","name":"VGPU.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM.VGPUs","log":"Virtual GPUs"},{"transition":"published field","name":"VM.attached_PCIs","log":"Currently passed-through PCI devices"},{"transition":"published field","name":"VM.order","log":"The point in the startup or shutdown sequence at which this VM will be started"},{"transition":"published field","name":"VM.shutdown_delay","log":"The delay to wait before proceeding to the next order in the shutdown sequence (seconds)"},{"transition":"published field","name":"VM.start_delay","log":"The delay to wait before proceeding to the next order in the startup sequence (seconds)"},{"transition":"published field","name":"VM.suspend_SR","log":"The SR on which a suspend image is stored"},{"transition":"published field","name":"VM.version","log":"The number of times this VM has been recovered"},{"transition":"published field","name":"event.snapshot","log":"The record of the database object that was added, changed or deleted"},{"transition":"published field","name":"host.PCIs","log":"List of PCI devices in the host"},{"transition":"published field","name":"host.PGPUs","log":"List of physical GPUs in the host"},{"transition":"published field","name":"host.chipset_info","log":"Information about chipset features"},{"transition":"published field","name":"pool.metadata_VDIs","log":"The set of currently known metadata VDIs for this pool"},{"transition":"published message","name":"Bond.set_mode","log":"Change the bond mode"},{"transition":"published message","name":"DR_task.create","log":"Create a disaster recovery task which will query the supplied list of devices"},{"transition":"published message","name":"DR_task.destroy","log":"Destroy the disaster recovery task, detaching and forgetting any SRs introduced which are no longer required"},{"transition":"published message","name":"GPU_group.create","log":""},{"transition":"published message","name":"GPU_group.destroy","log":""},{"transition":"published message","name":"SR.assert_supports_database_replication","log":"Returns successfully if the given SR supports database replication. Otherwise returns an error to explain why not."},{"transition":"published message","name":"SR.disable_database_replication","log":""},{"transition":"published message","name":"SR.enable_database_replication","log":""},{"transition":"published message","name":"VDI.open_database","log":"Load the metadata found on the supplied VDI and return a session reference which can be used in API calls to query its contents."},{"transition":"published message","name":"VDI.read_database_pool_uuid","log":"Check the VDI cache for the pool UUID of the database on this VDI."},{"transition":"published message","name":"VGPU.create","log":""},{"transition":"published message","name":"VGPU.destroy","log":""},{"transition":"published message","name":"VIF.unplug_force","log":"Forcibly unplug the specified VIF"},{"transition":"published message","name":"VM.assert_can_be_recovered","log":"Assert whether all SRs required to recover this VM are available."},{"transition":"published message","name":"VM.recover","log":"Recover the VM"},{"transition":"published message","name":"VM.set_appliance","log":"Assign this VM to an appliance."},{"transition":"published message","name":"VM.set_order","log":"Set this VM's boot order"},{"transition":"published message","name":"VM.set_shutdown_delay","log":"Set this VM's shutdown delay in seconds"},{"transition":"published message","name":"VM.set_start_delay","log":"Set this VM's start delay in seconds"},{"transition":"published message","name":"VM.set_suspend_VDI","log":"Set this VM's suspend VDI, which must be indentical to its current one"},{"transition":"published message","name":"VM_appliance.assert_can_be_recovered","log":"Assert whether all SRs required to recover this VM appliance are available."},{"transition":"published message","name":"VM_appliance.clean_shutdown","log":"Perform a clean shutdown of all the VMs in the appliance"},{"transition":"published message","name":"VM_appliance.hard_shutdown","log":"Perform a hard shutdown of all the VMs in the appliance"},{"transition":"published message","name":"VM_appliance.recover","log":"Recover the VM appliance"},{"transition":"published message","name":"VM_appliance.shutdown","log":"For each VM in the appliance, try to shut it down cleanly. If this fails, perform a hard shutdown of the VM."},{"transition":"published message","name":"VM_appliance.start","log":"Start all VMs in the appliance"},{"transition":"published message","name":"event.from","log":"Blocking call which returns a new token and a (possibly empty) batch of events. The returned token can be used in subsequent calls to this function."},{"transition":"deprecated field","name":"VM.PCI_bus","log":"Field was never used"},{"transition":"deprecated field","name":"VM.ha_always_run","log":""},{"transition":"deprecated field","name":"event.obj_uuid","log":""},{"transition":"deprecated field","name":"event.timestamp","log":""},{"transition":"deprecated message","name":"VM.set_ha_always_run","log":""},{"transition":"deprecated message","name":"event.next","log":""},{"transition":"deprecated message","name":"event.register","log":""},{"transition":"deprecated message","name":"event.unregister","log":""}],"tampa":[{"transition":"published field","name":"Bond.links_up","log":"Number of links up in this bond"},{"transition":"published field","name":"Bond.properties","log":"Additional configuration properties specific to the bond mode."},{"transition":"published field","name":"PIF.IPv6","log":"IPv6 address"},{"transition":"published field","name":"PIF.ipv6_configuration_mode","log":"Sets if and how this interface gets an IPv6 address"},{"transition":"published field","name":"PIF.ipv6_gateway","log":"IPv6 gateway"},{"transition":"published field","name":"PIF.primary_address_type","log":"Which protocol should define the primary address of this interface"},{"transition":"published field","name":"VIF.ipv4_allowed","log":"A list of IPv4 addresses which can be used to filter traffic passing through this VIF"},{"transition":"published field","name":"VIF.ipv6_allowed","log":"A list of IPv6 addresses which can be used to filter traffic passing through this VIF"},{"transition":"published field","name":"VIF.locking_mode","log":"current locking mode of the VIF"},{"transition":"published field","name":"blob.public","log":"True if the blob is publicly accessible"},{"transition":"published field","name":"host.guest_VCPUs_params","log":"VCPUs params to apply to all resident guests"},{"transition":"published field","name":"network.default_locking_mode","log":"The network will use this value to determine the behaviour of all VIFs where locking_mode = default"},{"transition":"published message","name":"Bond.set_property","log":"Set the value of a property of the bond"},{"transition":"published message","name":"PIF.reconfigure_ipv6","log":"Reconfigure the IPv6 address settings for this interface"},{"transition":"published message","name":"PIF.set_primary_address_type","log":"Change the primary address type used by this PIF"},{"transition":"published message","name":"VDI.pool_migrate","log":"Migrate a VDI, which may be attached to a running guest, to a different SR. The destination SR must be visible to the guest."},{"transition":"published message","name":"VIF.add_ipv4_allowed","log":"Associates an IPv4 address with this VIF"},{"transition":"published message","name":"VIF.add_ipv6_allowed","log":"Associates an IPv6 address with this VIF"},{"transition":"published message","name":"VIF.remove_ipv4_allowed","log":"Removes an IPv4 address from this VIF"},{"transition":"published message","name":"VIF.remove_ipv6_allowed","log":"Removes an IPv6 address from this VIF"},{"transition":"published message","name":"VIF.set_ipv4_allowed","log":"Set the IPv4 addresses to which traffic on this VIF can be restricted"},{"transition":"published message","name":"VIF.set_ipv6_allowed","log":"Set the IPv6 addresses to which traffic on this VIF can be restricted"},{"transition":"published message","name":"VIF.set_locking_mode","log":"Set the locking mode for this VIF"},{"transition":"published message","name":"VM.assert_can_migrate","log":"Assert whether a VM can be migrated to the specified destination."},{"transition":"published message","name":"VM.import_convert","log":"Import using a conversion service."},{"transition":"published message","name":"VM.migrate_send","log":"Migrate the VM to another host. This can only be called when the specified VM is in the Running state."},{"transition":"published message","name":"VM.query_services","log":"Query the system services advertised by this VM and register them. This can only be applied to a system domain."},{"transition":"published message","name":"event.inject","log":"Injects an artificial event on the given object and returns the corresponding ID in the form of a token, which can be used as a point of reference for database events. For example, to check whether an object has reached the right state before attempting an operation, one can inject an artificial event on the object and wait until the token returned by consecutive event.from calls is lexicographically greater than the one returned by event.inject."},{"transition":"published message","name":"host.get_management_interface","log":"Returns the management interface for the specified host"},{"transition":"published message","name":"host.migrate_receive","log":"Prepare to receive a VM, returning a token which can be passed to VM.migrate."},{"transition":"published message","name":"network.set_default_locking_mode","log":"Set the default locking mode for VIFs attached to this network"},{"transition":"published message","name":"pool_patch.clean_on_host","log":"Removes the patch's files from the specified host"},{"transition":"published message","name":"pool_patch.pool_clean","log":"Removes the patch's files from all hosts in the pool, but does not remove the database entries"},{"transition":"deprecated message","name":"VM.get_cooperative","log":""},{"transition":"deprecated message","name":"host.get_uncooperative_resident_VMs","log":""},{"transition":"removed class","name":"VBD_metrics","log":"Disabled in favour of RRD"},{"transition":"removed class","name":"VIF_metrics","log":"Disabled in favour of RRDs"},{"transition":"removed field","name":"PIF_metrics.io_read_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"PIF_metrics.io_write_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"VBD.metrics","log":"Disabled in favour of RRDs"},{"transition":"removed field","name":"VBD_metrics.io_read_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"VBD_metrics.io_write_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"VBD_metrics.last_updated","log":"Disabled in favour of RRD"},{"transition":"removed field","name":"VBD_metrics.other_config","log":"Disabled in favour of RRD"},{"transition":"removed field","name":"VIF.metrics","log":"Disabled in favour of RRDs"},{"transition":"removed field","name":"VIF_metrics.io_read_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"VIF_metrics.io_write_kbs","log":"Disabled and replaced by RRDs"},{"transition":"removed field","name":"VM_metrics.VCPUs_utilisation","log":"Disabled in favour of RRDs"},{"transition":"removed field","name":"host_metrics.memory_free","log":"Disabled in favour of RRD"}],"clearwater":[{"transition":"published field","name":"SM.features","log":"capabilities of the SM plugin, with capability version numbers"},{"transition":"published field","name":"VM.generation_id","log":"Generation ID of the VM"},{"transition":"published field","name":"session.originator","log":"a key string provided by a API user to distinguish itself from other users sharing the same login name"},{"transition":"published message","name":"VM.shutdown","log":"Attempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it."},{"transition":"published message","name":"host.declare_dead","log":"Declare that a host is dead. This is a dangerous operation, and should only be called if the administrator is absolutely sure the host is definitely dead"},{"transition":"published message","name":"pool.apply_edition","log":"Apply an edition to all hosts in the pool"},{"transition":"published message","name":"pool.get_license_state","log":"This call returns the license state for the pool"},{"transition":"deprecated field","name":"SM.capabilities","log":"Use SM.features instead"},{"transition":"deprecated field","name":"VM.protection_policy","log":"The VMPR feature was removed"},{"transition":"removed class","name":"VMPP","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VM.is_snapshot_from_vmpp","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.VMs","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.alarm_config","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.archive_frequency","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.archive_last_run_time","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.archive_schedule","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.archive_target_config","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.archive_target_type","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.backup_frequency","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.backup_last_run_time","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.backup_retention_value","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.backup_schedule","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.backup_type","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.is_alarm_enabled","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.is_archive_running","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.is_backup_running","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.is_policy_enabled","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.recent_alerts","log":"The VMPR feature was removed"},{"transition":"removed field","name":"VMPP.uuid","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VM.set_protection_policy","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.add_to_alarm_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.add_to_archive_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.add_to_archive_target_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.add_to_backup_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.archive_now","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.get_alerts","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.protect_now","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.remove_from_alarm_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.remove_from_archive_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.remove_from_archive_target_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.remove_from_backup_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_alarm_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_archive_frequency","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_archive_last_run_time","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_archive_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_archive_target_config","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_archive_target_type","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_backup_frequency","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_backup_last_run_time","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_backup_retention_value","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_backup_schedule","log":"The VMPR feature was removed"},{"transition":"removed message","name":"VMPP.set_is_alarm_enabled","log":"The VMPR feature was removed"},{"transition":"removed message","name":"host.license_apply","log":"Free licenses no longer handled by xapi"}],"vgpu-tech-preview":[{"transition":"published class","name":"VGPU_type","log":"A type of virtual GPU"},{"transition":"published field","name":"GPU_group.allocation_algorithm","log":"Current allocation of vGPUs to pGPUs for this group"},{"transition":"published field","name":"PGPU.enabled_VGPU_types","log":"List of VGPU types which have been enabled for this PGPU"},{"transition":"published field","name":"PGPU.resident_VGPUs","log":"List of VGPUs running on this PGPU"},{"transition":"published field","name":"PGPU.supported_VGPU_types","log":"List of VGPU types supported by the underlying hardware"},{"transition":"published field","name":"VGPU.resident_on","log":"The PGPU on which this VGPU is running"},{"transition":"published field","name":"VGPU.type","log":"Preset type for this VGPU"},{"transition":"published field","name":"VGPU_type.VGPUs","log":"List of VGPUs of this type"},{"transition":"published field","name":"VGPU_type.enabled_on_PGPUs","log":"List of PGPUs that have this VGPU type enabled"},{"transition":"published field","name":"VGPU_type.framebuffer_size","log":"Framebuffer size of the VGPU type, in bytes"},{"transition":"published field","name":"VGPU_type.max_heads","log":"Maximum number of displays supported by the VGPU type"},{"transition":"published field","name":"VGPU_type.model_name","log":"Model name associated with the VGPU type"},{"transition":"published field","name":"VGPU_type.supported_on_PGPUs","log":"List of PGPUs that support this VGPU type"},{"transition":"published field","name":"VGPU_type.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VGPU_type.vendor_name","log":"Name of VGPU vendor"},{"transition":"published message","name":"GPU_group.get_remaining_capacity","log":""},{"transition":"published message","name":"PGPU.add_enabled_VGPU_types","log":""},{"transition":"published message","name":"PGPU.get_remaining_capacity","log":""},{"transition":"published message","name":"PGPU.remove_enabled_VGPU_types","log":""},{"transition":"published message","name":"PGPU.set_GPU_group","log":""},{"transition":"published message","name":"PGPU.set_enabled_VGPU_types","log":""}],"vgpu-productisation":[{"transition":"published field","name":"GPU_group.enabled_VGPU_types","log":"vGPU types supported on at least one of the pGPUs in this group"},{"transition":"published field","name":"GPU_group.supported_VGPU_types","log":"vGPU types supported on at least one of the pGPUs in this group"},{"transition":"published field","name":"PGPU.supported_VGPU_max_capacities","log":"A map relating each VGPU type supported on this GPU to the maximum number of VGPUs of that type which can run simultaneously on this GPU"},{"transition":"published field","name":"PIF.managed","log":"Indicates whether the interface is managed by xapi. If it is not, then xapi will not configure the interface, the commands PIF.plug/unplug/reconfigure_ip(v6) cannot be used, nor can the interface be bonded or have VLANs based on top through xapi."},{"transition":"published field","name":"VGPU_type.enabled_on_GPU_groups","log":"List of GPU groups in which at least one have this VGPU type enabled"},{"transition":"published field","name":"VGPU_type.max_resolution_x","log":"Maximum resolution (width) supported by the VGPU type"},{"transition":"published field","name":"VGPU_type.max_resolution_y","log":"Maximum resolution (height) supported by the VGPU type"},{"transition":"published field","name":"VGPU_type.supported_on_GPU_groups","log":"List of GPU groups in which at least one PGPU supports this VGPU type"}],"clearwater-felton":[{"transition":"extended message","name":"VDI.copy","log":"The copy can now be performed into a pre-created VDI. It is now possible to request copying only changed blocks from a base VDI"}],"clearwater-whetstone":[{"transition":"published field","name":"PCI.subsystem_device_name","log":"Subsystem device name"},{"transition":"published field","name":"PCI.subsystem_vendor_name","log":"Subsystem vendor name"}],"creedence":[{"transition":"published field","name":"PIF.properties","log":"Additional configuration properties for the interface."},{"transition":"published field","name":"network.assigned_ips","log":"The IP addresses assigned to VIFs on networks that have active xapi-managed DHCP"},{"transition":"published message","name":"PIF.set_property","log":"Set the value of a property of the PIF"},{"transition":"published message","name":"VM.get_SRs_required_for_recovery","log":"List all the SR's that are required for the VM to be recovered"},{"transition":"published message","name":"VM_appliance.get_SRs_required_for_recovery","log":"Get the list of SRs required by the VM appliance to recover."}],"cream":[{"transition":"published field","name":"PGPU.dom0_access","log":"The accessibility of this device from dom0"},{"transition":"published field","name":"PGPU.is_system_display_device","log":"Is this device the system display device"},{"transition":"published field","name":"VM.hardware_platform_version","log":"The host virtual hardware platform version the VM can run on"},{"transition":"published field","name":"host.display","log":"indicates whether the host is configured to output its console to a physical display device"},{"transition":"published field","name":"host.virtual_hardware_platform_versions","log":"The set of versions of the virtual hardware platform that the host can offer to its guests"},{"transition":"published message","name":"PGPU.disable_dom0_access","log":""},{"transition":"published message","name":"PGPU.enable_dom0_access","log":""},{"transition":"published message","name":"VM.call_plugin","log":"Call an API plugin on this vm"},{"transition":"published message","name":"host.disable_display","log":"Disable console output to the physical display device next time this host boots"},{"transition":"published message","name":"host.enable_display","log":"Enable console output to the physical display device next time this host boots"}],"indigo":[{"transition":"published message","name":"host.license_add","log":"Functionality for parsing license files re-added"},{"transition":"published message","name":"host.license_remove","log":"Remove any license file from the specified host, and switch that host to the unlicensed edition"}],"dundee":[{"transition":"published class","name":"LVHD","log":"LVHD SR specific operations"},{"transition":"published field","name":"PIF.capabilities","log":"Additional capabilities on the interface."},{"transition":"published field","name":"SM.required_cluster_stack","log":"The storage plugin requires that one of these cluster stacks is configured and running."},{"transition":"published field","name":"SR.clustered","log":"True if the SR is using aggregated local storage"},{"transition":"published field","name":"SR.is_tools_sr","log":"True if this is the SR that contains the Tools ISO VDIs"},{"transition":"published field","name":"VDI.is_tools_iso","log":"Whether this VDI is a Tools ISO"},{"transition":"published field","name":"VGPU.scheduled_to_be_resident_on","log":"The PGPU on which this VGPU is scheduled to run"},{"transition":"published field","name":"VGPU_type.experimental","log":"Indicates whether VGPUs of this type should be considered experimental"},{"transition":"published field","name":"VGPU_type.identifier","log":"Key used to identify VGPU types and avoid creating duplicates - this field is used internally and not intended for interpretation by API clients"},{"transition":"published field","name":"VGPU_type.implementation","log":"The internal implementation of this VGPU type"},{"transition":"published field","name":"VIF.ipv4_addresses","log":"IPv4 addresses in CIDR format"},{"transition":"published field","name":"VIF.ipv4_configuration_mode","log":"Determines whether IPv4 addresses are configured on the VIF"},{"transition":"published field","name":"VIF.ipv4_gateway","log":"IPv4 gateway (the empty string means that no gateway is set)"},{"transition":"published field","name":"VIF.ipv6_addresses","log":"IPv6 addresses in CIDR format"},{"transition":"published field","name":"VIF.ipv6_configuration_mode","log":"Determines whether IPv6 addresses are configured on the VIF"},{"transition":"published field","name":"VIF.ipv6_gateway","log":"IPv6 gateway (the empty string means that no gateway is set)"},{"transition":"published field","name":"VM.has_vendor_device","log":"When an HVM guest starts, this controls the presence of the emulated C000 PCI device which triggers Windows Update to fetch or update PV drivers."},{"transition":"published field","name":"VM_guest_metrics.PV_drivers_detected","log":"At least one of the guest's devices has successfully connected to the backend."},{"transition":"published field","name":"VM_guest_metrics.can_use_hotplug_vbd","log":"To be used where relevant and available instead of checking PV driver version."},{"transition":"published field","name":"VM_guest_metrics.can_use_hotplug_vif","log":"To be used where relevant and available instead of checking PV driver version."},{"transition":"published field","name":"host.ssl_legacy","log":"Allow SSLv3 protocol and ciphersuites as used by older server versions. This controls both incoming and outgoing connections. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. API login sessions will remain valid."},{"transition":"published field","name":"pool.cpu_info","log":"Details about the physical CPUs on the pool"},{"transition":"published field","name":"pool.guest_agent_config","log":"Pool-wide guest agent configuration information"},{"transition":"published field","name":"pool.ha_cluster_stack","log":"The HA cluster stack that is currently in use. Only valid when HA is enabled."},{"transition":"published field","name":"pool.health_check_config","log":"Configuration for the automatic health check feature"},{"transition":"published field","name":"pool.policy_no_vendor_device","log":"This field was consulted when VM.create did not specify a value for 'has_vendor_device'; VM.create now uses a simple default and no longer consults this value."},{"transition":"published field","name":"task.backtrace","log":"Function call trace for debugging."},{"transition":"published message","name":"LVHD.enable_thin_provisioning","log":"Upgrades an LVHD SR to enable thin-provisioning. Future VDIs created in this SR will be thinly-provisioned, although existing VDIs will be left alone. Note that the SR must be attached to the SRmaster for upgrade to work."},{"transition":"published message","name":"SR.forget_data_source_archives","log":"Forget the recorded statistics related to the specified data source"},{"transition":"published message","name":"SR.get_data_sources","log":""},{"transition":"published message","name":"SR.query_data_source","log":"Query the latest value of the specified data source"},{"transition":"published message","name":"SR.record_data_source","log":"Start recording the specified data source"},{"transition":"published message","name":"VIF.configure_ipv4","log":"Configure IPv4 settings for this virtual interface"},{"transition":"published message","name":"VIF.configure_ipv6","log":"Configure IPv6 settings for this virtual interface"},{"transition":"published message","name":"VM.import","log":"Import an XVA from a URI"},{"transition":"published message","name":"VM.set_has_vendor_device","log":"Controls whether, when the VM starts in HVM mode, its virtual hardware will include the emulated PCI device for which drivers may be available through Windows Update. Usually this should never be changed on a VM on which Windows has been installed: changing it on such a VM is likely to lead to a crash on next start."},{"transition":"published message","name":"host.set_ssl_legacy","log":"Enable/disable SSLv3 for interoperability with older server versions. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. API login sessions will remain valid."},{"transition":"published message","name":"pool.add_to_guest_agent_config","log":"Add a key-value pair to the pool-wide guest agent configuration"},{"transition":"published message","name":"pool.disable_ssl_legacy","log":"Sets ssl_legacy false on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy."},{"transition":"published message","name":"pool.has_extension","log":"Return true if the extension is available on the pool"},{"transition":"published message","name":"pool.remove_from_guest_agent_config","log":"Remove a key-value pair from the pool-wide guest agent configuration"},{"transition":"published message","name":"session.create_from_db_file","log":""},{"transition":"deprecated field","name":"VM_guest_metrics.PV_drivers_up_to_date","log":"Deprecated in favour of PV_drivers_detected, and redefined in terms of it"},{"transition":"deprecated message","name":"pool.enable_ssl_legacy","log":"Legacy SSL will soon cease to be supported"},{"transition":"removed message","name":"host.reset_cpu_features","log":"Manual CPU feature setting was removed"},{"transition":"removed message","name":"host.set_cpu_features","log":"Manual CPU feature setting was removed"}],"ely":[{"transition":"published class","name":"PVS_cache_storage","log":"Describes the storage that is available to a PVS site for caching purposes"},{"transition":"published class","name":"PVS_proxy","log":"a proxy connects a VM/VIF with a PVS site"},{"transition":"published class","name":"PVS_server","log":"individual machine serving provisioning (block) data"},{"transition":"published class","name":"PVS_site","log":"machines serving blocks of data for provisioning VMs"},{"transition":"published class","name":"pool_update","log":"Pool-wide updates to the host software"},{"transition":"published field","name":"PVS_cache_storage.SR","log":"SR providing storage for the PVS cache"},{"transition":"published field","name":"PVS_cache_storage.VDI","log":"The VDI used for caching"},{"transition":"published field","name":"PVS_cache_storage.host","log":"The host on which this object defines PVS cache storage"},{"transition":"published field","name":"PVS_cache_storage.site","log":"The PVS_site for which this object defines the storage"},{"transition":"published field","name":"PVS_cache_storage.size","log":"The size of the cache VDI (in bytes)"},{"transition":"published field","name":"PVS_cache_storage.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PVS_proxy.VIF","log":"VIF of the VM using the proxy"},{"transition":"published field","name":"PVS_proxy.currently_attached","log":"true = VM is currently proxied"},{"transition":"published field","name":"PVS_proxy.site","log":"PVS site this proxy is part of"},{"transition":"published field","name":"PVS_proxy.status","log":"The run-time status of the proxy"},{"transition":"published field","name":"PVS_proxy.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PVS_server.addresses","log":"IPv4 addresses of this server"},{"transition":"published field","name":"PVS_server.first_port","log":"First UDP port accepted by this server"},{"transition":"published field","name":"PVS_server.last_port","log":"Last UDP port accepted by this server"},{"transition":"published field","name":"PVS_server.site","log":"PVS site this server is part of"},{"transition":"published field","name":"PVS_server.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PVS_site.PVS_uuid","log":"Unique identifier of the PVS site, as configured in PVS"},{"transition":"published field","name":"PVS_site.cache_storage","log":"The SR used by PVS proxy for the cache"},{"transition":"published field","name":"PVS_site.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"PVS_site.name_label","log":"a human-readable name"},{"transition":"published field","name":"PVS_site.proxies","log":"The set of proxies associated with the site"},{"transition":"published field","name":"PVS_site.servers","log":"The set of PVS servers in the site"},{"transition":"published field","name":"PVS_site.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM.reference_label","log":"Textual reference to the template used to create a VM. This can be used by clients in need of an immutable reference to the template since the latter's uuid and name_label may change, for example, after a package installation or upgrade."},{"transition":"published field","name":"VM.requires_reboot","log":"Indicates whether a VM requires a reboot in order to update its configuration, e.g. its memory allocation."},{"transition":"published field","name":"VM_metrics.hvm","log":"hardware virtual machine"},{"transition":"published field","name":"VM_metrics.nested_virt","log":"VM supports nested virtualisation"},{"transition":"published field","name":"VM_metrics.nomigrate","log":"VM is immobile and can't migrate between hosts"},{"transition":"published field","name":"host.control_domain","log":"The control domain (domain 0)"},{"transition":"published field","name":"host.updates","log":"Set of updates"},{"transition":"published field","name":"host.updates_requiring_reboot","log":"List of updates which require reboot"},{"transition":"published field","name":"pool.live_patching_disabled","log":"The pool-wide flag to show if the live patching feauture is disabled or not."},{"transition":"published field","name":"pool_patch.pool_update","log":"A reference to the associated pool_update object"},{"transition":"published field","name":"pool_update.after_apply_guidance","log":"What the client should do after this update has been applied."},{"transition":"published field","name":"pool_update.hosts","log":"The hosts that have applied this update."},{"transition":"published field","name":"pool_update.installation_size","log":"Size of the update in bytes"},{"transition":"published field","name":"pool_update.key","log":"GPG key of the update"},{"transition":"published field","name":"pool_update.version","log":"Update version number"},{"transition":"published message","name":"PVS_proxy.create","log":"Configure a VM/VIF to use a PVS proxy"},{"transition":"published message","name":"PVS_proxy.destroy","log":"remove (or switch off) a PVS proxy for this VM"},{"transition":"published message","name":"PVS_server.forget","log":"forget a PVS server"},{"transition":"published message","name":"PVS_server.introduce","log":"introduce new PVS server"},{"transition":"published message","name":"PVS_site.forget","log":"Remove a site's meta data"},{"transition":"published message","name":"PVS_site.introduce","log":"Introduce new PVS site"},{"transition":"published message","name":"PVS_site.set_PVS_uuid","log":"Update the PVS UUID of the PVS site"},{"transition":"published message","name":"VIF.move","log":"Move the specified VIF to the specified network, even while the VM is running"},{"transition":"published message","name":"VM.set_memory","log":"Set the memory allocation of this VM. Sets all of memory_static_max, memory_dynamic_min, and memory_dynamic_max to the given value, and leaves memory_static_min untouched."},{"transition":"published message","name":"host.call_extension","log":"Call an API extension on this host"},{"transition":"published message","name":"host.has_extension","log":"Return true if the extension is available on the host"},{"transition":"published message","name":"pool_update.apply","log":"Apply the selected update to a host"},{"transition":"published message","name":"pool_update.destroy","log":"Removes the database entry. Only works on unapplied update."},{"transition":"published message","name":"pool_update.introduce","log":"Introduce update VDI"},{"transition":"published message","name":"pool_update.pool_apply","log":"Apply the selected update to all hosts in the pool"},{"transition":"published message","name":"pool_update.pool_clean","log":"Removes the update's files from all hosts in the pool, but does not revert the update"},{"transition":"published message","name":"pool_update.precheck","log":"Execute the precheck stage of the selected update on a host"},{"transition":"changed message","name":"VM.set_VCPUs_number_live","log":"Unless the feature is explicitly enabled for every host in the pool, this fails with Api_errors.license_restriction."},{"transition":"deprecated class","name":"host_patch","log":""},{"transition":"deprecated class","name":"pool_patch","log":""},{"transition":"deprecated field","name":"VDI.parent","log":"The field was never used."},{"transition":"deprecated field","name":"host.patches","log":""},{"transition":"deprecated message","name":"host.refresh_pack_info","log":"Use Pool_update.resync_host instead"},{"transition":"deprecated message","name":"pool_patch.apply","log":""},{"transition":"deprecated message","name":"pool_patch.clean","log":""},{"transition":"deprecated message","name":"pool_patch.clean_on_host","log":""},{"transition":"deprecated message","name":"pool_patch.destroy","log":""},{"transition":"deprecated message","name":"pool_patch.pool_apply","log":""},{"transition":"deprecated message","name":"pool_patch.pool_clean","log":""},{"transition":"deprecated message","name":"pool_patch.precheck","log":""}],"falcon":[{"transition":"published class","name":"Feature","log":"A new piece of functionality"},{"transition":"published class","name":"SDN_controller","log":"Describes the SDN controller that is to connect with the pool"},{"transition":"published class","name":"VMSS","log":"VM Snapshot Schedule"},{"transition":"published field","name":"Feature.enabled","log":"Indicates whether the feature is enabled"},{"transition":"published field","name":"Feature.experimental","log":"Indicates whether the feature is experimental (as opposed to stable and fully supported)"},{"transition":"published field","name":"Feature.host","log":"The host where this feature is available"},{"transition":"published field","name":"Feature.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"Feature.version","log":"The version of this feature"},{"transition":"published field","name":"SDN_controller.address","log":"IP address of the controller"},{"transition":"published field","name":"SDN_controller.port","log":"TCP port of the controller"},{"transition":"published field","name":"SDN_controller.protocol","log":"Protocol to connect with SDN controller"},{"transition":"published field","name":"SDN_controller.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VM.is_default_template","log":"Identifies default templates"},{"transition":"published field","name":"VM.is_vmss_snapshot","log":"true if this snapshot was created by the snapshot schedule"},{"transition":"published field","name":"VM.snapshot_schedule","log":"Ref pointing to a snapshot schedule for this VM"},{"transition":"published field","name":"host.features","log":"List of features available on this host"},{"transition":"published field","name":"network.managed","log":"true if the bridge is managed by xapi"},{"transition":"published message","name":"SDN_controller.forget","log":"Remove the OVS manager of the pool and destroy the db record."},{"transition":"published message","name":"SDN_controller.introduce","log":"Introduce an SDN controller to the pool."},{"transition":"published message","name":"VM.set_snapshot_schedule","log":"Set the value of the snapshot schedule field"},{"transition":"published message","name":"VMSS.add_to_schedule","log":""},{"transition":"published message","name":"VMSS.remove_from_schedule","log":""},{"transition":"published message","name":"VMSS.set_frequency","log":"Set the value of the frequency field"},{"transition":"published message","name":"VMSS.set_last_run_time","log":""},{"transition":"published message","name":"VMSS.set_retained_snapshots","log":""},{"transition":"published message","name":"VMSS.set_schedule","log":""},{"transition":"published message","name":"VMSS.set_type","log":""},{"transition":"published message","name":"VMSS.snapshot_now","log":"This call executes the snapshot schedule immediately"},{"transition":"published message","name":"task.set_status","log":"Set the task status"},{"transition":"changed field","name":"network.bridge","log":"Added to the constructor (network.create)"},{"transition":"deprecated field","name":"pool.vswitch_controller","log":"Deprecated: set the IP address of the vswitch controller in SDN_controller instead."},{"transition":"deprecated message","name":"pool.set_vswitch_controller","log":"Deprecated: use 'SDN_controller.introduce' and 'SDN_controller.forget' instead."}],"inverness":[{"transition":"published class","name":"PUSB","log":"A physical USB device"},{"transition":"published class","name":"USB_group","log":"A group of compatible USBs across the resource pool"},{"transition":"published class","name":"VUSB","log":"Describes the vusb device"},{"transition":"published class","name":"vdi_nbd_server_info","log":"Details for connecting to a VDI using the Network Block Device protocol"},{"transition":"published field","name":"PGPU.compatibility_metadata","log":"PGPU metadata to determine whether a VGPU can migrate between two PGPUs"},{"transition":"published field","name":"PIF.igmp_snooping_status","log":"The IGMP snooping status of the corresponding network bridge"},{"transition":"published field","name":"PUSB.USB_group","log":"USB group the PUSB is contained in"},{"transition":"published field","name":"PUSB.description","log":"USB device description"},{"transition":"published field","name":"PUSB.host","log":"Physical machine that owns the USB device"},{"transition":"published field","name":"PUSB.other_config","log":"additional configuration"},{"transition":"published field","name":"PUSB.passthrough_enabled","log":"enabled for passthrough"},{"transition":"published field","name":"PUSB.path","log":"port path of USB device"},{"transition":"published field","name":"PUSB.product_desc","log":"product description of the USB device"},{"transition":"published field","name":"PUSB.product_id","log":"product id of the USB device"},{"transition":"published field","name":"PUSB.serial","log":"serial of the USB device"},{"transition":"published field","name":"PUSB.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PUSB.vendor_desc","log":"vendor description of the USB device"},{"transition":"published field","name":"PUSB.vendor_id","log":"vendor id of the USB device"},{"transition":"published field","name":"PUSB.version","log":"USB device version"},{"transition":"published field","name":"USB_group.PUSBs","log":"List of PUSBs in the group"},{"transition":"published field","name":"USB_group.VUSBs","log":"List of VUSBs using the group"},{"transition":"published field","name":"USB_group.name_description","log":"a notes field containing human-readable description"},{"transition":"published field","name":"USB_group.name_label","log":"a human-readable name"},{"transition":"published field","name":"USB_group.other_config","log":"Additional configuration"},{"transition":"published field","name":"USB_group.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"VDI.cbt_enabled","log":"True if changed blocks are tracked for this VDI"},{"transition":"published field","name":"VGPU.compatibility_metadata","log":"VGPU metadata to determine whether a VGPU can migrate between two PGPUs"},{"transition":"published field","name":"VUSB.USB_group","log":"USB group used by the VUSB"},{"transition":"published field","name":"VUSB.VM","log":"VM that owns the VUSB"},{"transition":"published field","name":"VUSB.other_config","log":"Additional configuration"},{"transition":"published field","name":"VUSB.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"host.PUSBs","log":"List of physical USBs in the host"},{"transition":"published field","name":"network.purpose","log":"Set of purposes for which the server will use this network"},{"transition":"published field","name":"pool.igmp_snooping_enabled","log":"true if IGMP snooping is enabled in the pool, false otherwise."},{"transition":"published field","name":"pool_update.enforce_homogeneity","log":"Flag - if true, all hosts in a pool must apply this update"},{"transition":"published field","name":"pool_update.other_config","log":"additional configuration"},{"transition":"published field","name":"vdi_nbd_server_info.address","log":"An address on which the server can be reached; this can be IPv4, IPv6, or a DNS name."},{"transition":"published field","name":"vdi_nbd_server_info.cert","log":"The TLS certificate of the server"},{"transition":"published field","name":"vdi_nbd_server_info.exportname","log":"The exportname to request over NBD. This holds details including an authentication token, so it must be protected appropriately. Clients should regard the exportname as an opaque string or token."},{"transition":"published field","name":"vdi_nbd_server_info.port","log":"The TCP port"},{"transition":"published field","name":"vdi_nbd_server_info.subject","log":"For convenience, this redundant field holds a DNS (hostname) subject of the certificate. This can be a wildcard, but only for a certificate that has a wildcard subject and no concrete hostname subjects."},{"transition":"published message","name":"PUSB.scan","log":""},{"transition":"published message","name":"PUSB.set_passthrough_enabled","log":""},{"transition":"published message","name":"USB_group.create","log":""},{"transition":"published message","name":"USB_group.destroy","log":""},{"transition":"published message","name":"VDI.data_destroy","log":"Delete the data of the snapshot VDI, but keep its changed block tracking metadata. When successful, this call changes the type of the VDI to cbt_metadata. This operation is idempotent: calling it on a VDI of type cbt_metadata results in a no-op, and no error will be thrown."},{"transition":"published message","name":"VDI.disable_cbt","log":"Disable changed block tracking for the VDI. This call is only allowed on VDIs that support enabling CBT. It is an idempotent operation - disabling CBT for a VDI for which CBT is not enabled results in a no-op, and no error will be thrown."},{"transition":"published message","name":"VDI.enable_cbt","log":"Enable changed block tracking for the VDI. This call is idempotent - enabling CBT for a VDI for which CBT is already enabled results in a no-op, and no error will be thrown."},{"transition":"published message","name":"VDI.get_nbd_info","log":"Get details specifying how to access this VDI via a Network Block Device server. For each of a set of NBD server addresses on which the VDI is available, the return value set contains a vdi_nbd_server_info object that contains an exportname to request once the NBD connection is established, and connection details for the address. An empty list is returned if there is no network that has a PIF on a host with access to the relevant SR, or if no such network has been assigned an NBD-related purpose in its purpose field. To access the given VDI, any of the vdi_nbd_server_info objects can be used to make a connection to a server, and then the VDI will be available by requesting the exportname."},{"transition":"published message","name":"VDI.list_changed_blocks","log":"Compare two VDIs in 64k block increments and report which blocks differ. This operation is not allowed when vdi_to is attached to a VM."},{"transition":"published message","name":"VM.set_bios_strings","log":"Set custom BIOS strings to this VM. VM will be given a default set of BIOS strings, only some of which can be overridden by the supplied values. Allowed keys are: 'bios-vendor', 'bios-version', 'system-manufacturer', 'system-product-name', 'system-version', 'system-serial-number', 'enclosure-asset-tag', 'baseboard-manufacturer', 'baseboard-product-name', 'baseboard-version', 'baseboard-serial-number', 'baseboard-asset-tag', 'baseboard-location-in-chassis', 'enclosure-asset-tag'"},{"transition":"published message","name":"VUSB.create","log":"Create a new VUSB record in the database only"},{"transition":"published message","name":"VUSB.destroy","log":"Removes a VUSB record from the database"},{"transition":"published message","name":"VUSB.unplug","log":"Unplug the vusb device from the vm."},{"transition":"published message","name":"network.add_purpose","log":"Give a network a new purpose (if not present already)"},{"transition":"published message","name":"network.remove_purpose","log":"Remove a purpose from a network (if present)"},{"transition":"published message","name":"pool.management_reconfigure","log":"Reconfigure the management network interface for all Hosts in the Pool"},{"transition":"published message","name":"pool.set_igmp_snooping_enabled","log":"Enable or disable IGMP Snooping on the pool."},{"transition":"changed message","name":"host.get_server_certificate","log":"Now available to all RBAC roles."},{"transition":"deprecated class","name":"crashdump","log":""},{"transition":"deprecated message","name":"VM.get_boot_record","log":"Use the current VM record/fields instead"},{"transition":"removed message","name":"VDI.resize_online","log":"Online VDI resize is not supported by any of the storage backends."}],"jura":[{"transition":"prototyped field","name":"VM.domain_type","log":"Internal-only field; not yet in the public API"},{"transition":"prototyped field","name":"VM_metrics.current_domain_type","log":"Not yet implemented (for future use)"}],"kolkata":[{"transition":"prototyped class","name":"Cluster","log":""},{"transition":"prototyped class","name":"Cluster_host","log":""},{"transition":"prototyped class","name":"probe_result","log":""},{"transition":"prototyped class","name":"sr_stat","log":""},{"transition":"prototyped field","name":"Cluster.cluster_config","log":""},{"transition":"prototyped field","name":"Cluster.cluster_hosts","log":""},{"transition":"prototyped field","name":"Cluster.cluster_stack","log":""},{"transition":"prototyped field","name":"Cluster.cluster_stack_version","log":""},{"transition":"prototyped field","name":"Cluster.cluster_token","log":""},{"transition":"prototyped field","name":"Cluster.other_config","log":""},{"transition":"prototyped field","name":"Cluster.pool_auto_join","log":""},{"transition":"prototyped field","name":"Cluster.token_timeout","log":"the unit is milliseconds"},{"transition":"prototyped field","name":"Cluster.token_timeout_coefficient","log":"the unit is milliseconds"},{"transition":"prototyped field","name":"Cluster.uuid","log":""},{"transition":"prototyped field","name":"Cluster_host.PIF","log":""},{"transition":"prototyped field","name":"Cluster_host.cluster","log":""},{"transition":"prototyped field","name":"Cluster_host.enabled","log":""},{"transition":"prototyped field","name":"Cluster_host.host","log":""},{"transition":"prototyped field","name":"Cluster_host.joined","log":""},{"transition":"prototyped field","name":"Cluster_host.other_config","log":""},{"transition":"prototyped field","name":"Cluster_host.uuid","log":""},{"transition":"prototyped field","name":"probe_result.complete","log":""},{"transition":"prototyped field","name":"probe_result.configuration","log":""},{"transition":"prototyped field","name":"probe_result.extra_info","log":""},{"transition":"prototyped field","name":"probe_result.sr","log":""},{"transition":"prototyped field","name":"sr_stat.clustered","log":""},{"transition":"prototyped field","name":"sr_stat.free_space","log":""},{"transition":"prototyped field","name":"sr_stat.health","log":""},{"transition":"prototyped field","name":"sr_stat.name_description","log":""},{"transition":"prototyped field","name":"sr_stat.name_label","log":""},{"transition":"prototyped field","name":"sr_stat.total_space","log":""},{"transition":"prototyped field","name":"sr_stat.uuid","log":""},{"transition":"prototyped message","name":"Cluster.create","log":""},{"transition":"prototyped message","name":"Cluster.destroy","log":""},{"transition":"prototyped message","name":"Cluster.get_network","log":""},{"transition":"prototyped message","name":"Cluster.pool_create","log":""},{"transition":"prototyped message","name":"Cluster.pool_destroy","log":""},{"transition":"prototyped message","name":"Cluster.pool_force_destroy","log":""},{"transition":"prototyped message","name":"Cluster.pool_resync","log":""},{"transition":"prototyped message","name":"Cluster_host.create","log":""},{"transition":"prototyped message","name":"Cluster_host.destroy","log":""},{"transition":"prototyped message","name":"Cluster_host.disable","log":""},{"transition":"prototyped message","name":"Cluster_host.enable","log":""},{"transition":"prototyped message","name":"Cluster_host.force_destroy","log":""},{"transition":"prototyped message","name":"SR.probe_ext","log":""},{"transition":"published class","name":"network_sriov","log":"network-sriov which connects logical pif and physical pif"},{"transition":"published field","name":"PCI.driver_name","log":"Driver name"},{"transition":"published field","name":"PIF.PCI","log":"Link to underlying PCI device"},{"transition":"published field","name":"PIF.sriov_logical_PIF_of","log":"Indicates which network_sriov this interface is logical of"},{"transition":"published field","name":"PIF.sriov_physical_PIF_of","log":"Indicates which network_sriov this interface is physical of"},{"transition":"published field","name":"VM.domain_type","log":"The field is now valid"},{"transition":"published field","name":"VM_metrics.current_domain_type","log":"This field now contains valid data"},{"transition":"published field","name":"host.iscsi_iqn","log":"The initiator IQN for the host"},{"transition":"published field","name":"host.multipathing","log":"Specifies whether multipathing is enabled"},{"transition":"published field","name":"network_sriov.configuration_mode","log":"The mode for configure network sriov"},{"transition":"published field","name":"network_sriov.logical_PIF","log":"The logical PIF to connect to the SR-IOV network after enable SR-IOV on the physical PIF"},{"transition":"published field","name":"network_sriov.physical_PIF","log":"The PIF that has SR-IOV enabled"},{"transition":"published field","name":"network_sriov.requires_reboot","log":"Indicates whether the host need to be rebooted before SR-IOV is enabled on the physical PIF"},{"transition":"published message","name":"VM.set_domain_type","log":"Set the VM.domain_type field of the given VM, which will take effect when it is next started"},{"transition":"published message","name":"host.set_iscsi_iqn","log":"Sets the initiator IQN for the host"},{"transition":"published message","name":"host.set_multipathing","log":"Specifies whether multipathing is enabled"},{"transition":"published message","name":"network_sriov.create","log":"Enable SR-IOV on the specific PIF. It will create a network-sriov based on the specific PIF and automatically create a logical PIF to connect the specific network."},{"transition":"published message","name":"network_sriov.destroy","log":"Disable SR-IOV on the specific PIF. It will destroy the network-sriov and the logical PIF accordingly."},{"transition":"published message","name":"network_sriov.get_remaining_capacity","log":"Get the number of free SR-IOV VFs on the associated PIF"},{"transition":"deprecated field","name":"VM.HVM_boot_policy","log":"Replaced by VM.domain_type"},{"transition":"deprecated message","name":"VM.set_HVM_boot_policy","log":"Replaced by VM.set_domain_type"}],"lima":[{"transition":"published class","name":"Cluster","log":"Cluster-wide Cluster metadata"},{"transition":"published class","name":"Cluster_host","log":"Cluster member metadata"},{"transition":"published class","name":"probe_result","log":"A set of properties that describe one result element of SR.probe. Result elements and properties can change dynamically based on changes to the the SR.probe input-parameters or the target."},{"transition":"published class","name":"sr_stat","log":"A set of high-level properties associated with an SR."},{"transition":"published field","name":"Cluster.cluster_config","log":"Contains read-only settings for the cluster, such as timeouts and other options. It can only be set at cluster create time"},{"transition":"published field","name":"Cluster.cluster_hosts","log":"A list of the cluster_host objects associated with the Cluster"},{"transition":"published field","name":"Cluster.cluster_stack","log":"Simply the string 'corosync'. No other cluster stacks are currently supported"},{"transition":"published field","name":"Cluster.cluster_stack_version","log":"Version of cluster stack, not writable via the API. Defaulting to 2 for backwards compatibility when upgrading from a cluster without this field, which means it is necessarily running version 2 of corosync, the only cluster stack supported so far."},{"transition":"published field","name":"Cluster.cluster_token","log":"The secret key used by xapi-clusterd when it talks to itself on other hosts"},{"transition":"published field","name":"Cluster.other_config","log":"Additional configuration"},{"transition":"published field","name":"Cluster.pending_forget","log":"Internal field used by Host.destroy to store the IP of cluster members marked as permanently dead but not yet removed"},{"transition":"published field","name":"Cluster.pool_auto_join","log":"True if automatically joining new pool members to the cluster. This will be `true` in the first release"},{"transition":"published field","name":"Cluster.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"Cluster_host.PIF","log":"Reference to the PIF object"},{"transition":"published field","name":"Cluster_host.cluster","log":"Reference to the Cluster object"},{"transition":"published field","name":"Cluster_host.enabled","log":"Whether the cluster host believes that clustering should be enabled on this host. This field can be altered by calling the enable/disable message on a cluster host. Only enabled members run the underlying cluster stack. Disabled members are still considered a member of the cluster (see joined), and can be re-enabled by the user."},{"transition":"published field","name":"Cluster_host.host","log":"Reference to the Host object"},{"transition":"published field","name":"Cluster_host.joined","log":"Whether the cluster host has joined the cluster. Contrary to enabled, a host that is not joined is not considered a member of the cluster, and hence enable and disable operations cannot be performed on this host."},{"transition":"published field","name":"Cluster_host.other_config","log":"Additional configuration"},{"transition":"published field","name":"Cluster_host.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"probe_result.complete","log":"True if this configuration is complete and can be used to call SR.create. False if it requires further iterative calls to SR.probe, to potentially narrow down on a configuration that can be used."},{"transition":"published field","name":"probe_result.configuration","log":"Plugin-specific configuration which describes where and how to locate the storage repository. This may include the physical block device name, a remote NFS server and path or an RBD storage pool."},{"transition":"published field","name":"probe_result.extra_info","log":"Additional plugin-specific information about this configuration, that might be of use for an API user. This can for example include the LUN or the WWPN."},{"transition":"published field","name":"probe_result.sr","log":"Existing SR found for this configuration"},{"transition":"published field","name":"sr_stat.clustered","log":"Indicates whether the SR uses clustered local storage."},{"transition":"published field","name":"sr_stat.free_space","log":"Number of bytes free on the backing storage (in bytes)"},{"transition":"published field","name":"sr_stat.health","log":"The health status of the SR."},{"transition":"published field","name":"sr_stat.name_description","log":"Longer, human-readable description of the SR. Descriptions are generally only displayed by clients when the user is examining SRs in detail."},{"transition":"published field","name":"sr_stat.name_label","log":"Short, human-readable label for the SR."},{"transition":"published field","name":"sr_stat.total_space","log":"Total physical size of the backing storage (in bytes)"},{"transition":"published field","name":"sr_stat.uuid","log":"Uuid that uniquely identifies this SR, if one is available."},{"transition":"published message","name":"Cluster.create","log":"Creates a Cluster object and one Cluster_host object as its first member"},{"transition":"published message","name":"Cluster.destroy","log":"Destroys a Cluster object and the one remaining Cluster_host member"},{"transition":"published message","name":"Cluster.get_network","log":"Returns the network used by the cluster for inter-host communication, i.e. the network shared by all cluster host PIFs"},{"transition":"published message","name":"Cluster.pool_create","log":"Attempt to create a Cluster from the entire pool"},{"transition":"published message","name":"Cluster.pool_destroy","log":"Attempt to destroy the Cluster_host objects for all hosts in the pool and then destroy the Cluster."},{"transition":"published message","name":"Cluster.pool_force_destroy","log":"Attempt to force destroy the Cluster_host objects, and then destroy the Cluster."},{"transition":"published message","name":"Cluster.pool_resync","log":"Resynchronise the cluster_host objects across the pool. Creates them where they need creating and then plugs them"},{"transition":"published message","name":"Cluster_host.create","log":"Add a new host to an existing cluster."},{"transition":"published message","name":"Cluster_host.destroy","log":"Remove the host from an existing cluster. This operation is allowed even if a cluster host is not enabled."},{"transition":"published message","name":"Cluster_host.disable","log":"Disable cluster membership for an enabled cluster host."},{"transition":"published message","name":"Cluster_host.enable","log":"Enable cluster membership for a disabled cluster host."},{"transition":"published message","name":"Cluster_host.force_destroy","log":"Remove a host from an existing cluster forcefully."},{"transition":"published message","name":"SR.probe_ext","log":"Perform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config."},{"transition":"changed field","name":"Cluster.token_timeout","log":"the unit is now seconds"},{"transition":"changed field","name":"Cluster.token_timeout_coefficient","log":"the unit is now seconds"}],"naples":[{"transition":"published field","name":"VM.NVRAM","log":"initial value for guest NVRAM (containing UEFI variables, etc). Cannot be changed while the VM is running"},{"transition":"published message","name":"VM.add_to_NVRAM","log":""},{"transition":"published message","name":"VM.remove_from_NVRAM","log":""},{"transition":"published message","name":"VM.set_NVRAM","log":""}],"oslo":[],"quebec":[{"transition":"published field","name":"Bond.auto_update_mac","log":"true if the MAC was taken from the primary slave when the bond was created, and false if the client specified the MAC"},{"transition":"published field","name":"VGPU.PCI","log":"Device passed trough to VM, either as full device or SR-IOV virtual function"},{"transition":"published field","name":"VGPU.extra_args","log":"Extra arguments for vGPU and passed to demu"},{"transition":"published field","name":"VGPU_type.compatible_types_in_vm","log":"List of VGPU types which are compatible in one VM"},{"transition":"published field","name":"host.uefi_certificates","log":"The UEFI certificates allowing Secure Boot"},{"transition":"published field","name":"pool.uefi_certificates","log":"The UEFI certificates allowing Secure Boot"},{"transition":"published message","name":"host.set_uefi_certificates","log":"Sets the UEFI certificates on a host"},{"transition":"changed message","name":"VM.assert_can_boot_here","log":"Does additional compatibility checks when VM powerstate is not halted (e.g. CPUID). Use this before calling VM.resume or VM.pool_migrate."},{"transition":"removed message","name":"VM.snapshot_with_quiesce","log":"VSS support has been removed"}],"stockholm":[{"transition":"published class","name":"Certificate","log":"An X509 certificate used for TLS connections"},{"transition":"published field","name":"Certificate.fingerprint","log":"The certificate's SHA256 fingerprint / hash"},{"transition":"published field","name":"Certificate.host","log":"The host where the certificate is installed"},{"transition":"published field","name":"Certificate.not_after","log":"Date before which the certificate is valid"},{"transition":"published field","name":"Certificate.not_before","log":"Date after which the certificate is valid"},{"transition":"published field","name":"Certificate.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"PUSB.speed","log":"USB device speed"},{"transition":"published field","name":"host.certificates","log":"List of certificates installed in the host"},{"transition":"published field","name":"host.editions","log":"List of all available product editions"},{"transition":"published message","name":"host.emergency_reset_server_certificate","log":"Delete the current TLS server certificate and replace by a new, self-signed one. This should only be used with extreme care."},{"transition":"published message","name":"host.install_server_certificate","log":"Install the TLS server certificate."},{"transition":"published message","name":"task.set_progress","log":"Set the task progress"},{"transition":"changed message","name":"host.set_power_on_mode","log":"Removed iLO script"},{"transition":"changed message","name":"host.set_ssl_legacy","log":"Legacy SSL no longer supported"},{"transition":"deprecated field","name":"host.ssl_legacy","log":"Legacy SSL no longer supported"},{"transition":"deprecated message","name":"pool.disable_ssl_legacy","log":"Legacy SSL no longer supported"},{"transition":"removed message","name":"pool.enable_ssl_legacy","log":"Legacy SSL no longer supported"}],"stockholm_psr":[{"transition":"published field","name":"pool.is_psr_pending","log":"True if either a PSR is running or we are waiting for a PSR to be re-run"},{"transition":"published message","name":"pool.rotate_secret","log":""}],"nile-preview":[],"nile":[],"1.250.0":[{"transition":"published field","name":"tunnel.protocol","log":"Add protocol field to tunnel"}],"1.257.0":[{"transition":"changed class","name":"VM","log":"possibility to create a VM in suspended mode with a suspend_VDI set"},{"transition":"changed field","name":"VBD.currently_attached","log":"Made StaticRO to allow plugged VIF and VBD creation for Suspended VM"},{"transition":"changed field","name":"VBD.device","log":"Become static to allow plugged VBD creation for Suspended VM"},{"transition":"changed field","name":"VIF.currently_attached","log":"Made StaticRO to allow plugged VIF and VBD creation for Suspended VM"},{"transition":"changed field","name":"VM.last_booted_record","log":"Become static to allow Suspended VM creation"},{"transition":"changed field","name":"VM.power_state","log":"Made StaticRO to allow Suspended VM creation"},{"transition":"changed field","name":"VM.suspend_VDI","log":"Become static to allow Suspended VM creation"}],"1.271.0":[{"transition":"published message","name":"host.get_sched_gran","log":"Gets xen's sched-gran on a host"},{"transition":"published message","name":"host.set_sched_gran","log":"Sets xen's sched-gran on a host. See: https://xenbits.xen.org/docs/unstable/misc/xen-command-line.html#sched-gran-x86"}],"1.290.0":[{"transition":"published field","name":"pool.tls_verification_enabled","log":"True iff TLS certificate verification is enabled"},{"transition":"published message","name":"host.emergency_disable_tls_verification","log":"Disable TLS verification for this host only"},{"transition":"published message","name":"host.reset_server_certificate","log":"Delete the current TLS server certificate and replace by a new, self-signed one. This should only be used with extreme care."},{"transition":"published message","name":"pool.enable_tls_verification","log":"Enable TLS server certificate verification"},{"transition":"published message","name":"pool.install_ca_certificate","log":"Install TLS CA certificate"},{"transition":"published message","name":"pool.uninstall_ca_certificate","log":"Uninstall TLS CA certificate"},{"transition":"deprecated field","name":"pool.wlb_verify_cert","log":"Deprecated: to enable TLS verification use Pool.enable_tls_verification instead"},{"transition":"deprecated message","name":"pool.certificate_install","log":"Use Pool.install_ca_certificate instead"},{"transition":"deprecated message","name":"pool.certificate_list","log":"Use openssl to inspect certificate"},{"transition":"deprecated message","name":"pool.certificate_uninstall","log":"Use Pool.uninstall_ca_certificate instead"}],"1.294.0":[{"transition":"published field","name":"Certificate.name","log":"The name of the certificate, only present on certificates of type 'ca'"},{"transition":"published field","name":"Certificate.type","log":"The type of the certificate, either 'ca', 'host' or 'host_internal'"}],"1.297.0":[{"transition":"extended message","name":"host.evacuate","log":"Enable migration network selection."}],"1.298.0":[{"transition":"published message","name":"host.emergency_reenable_tls_verification","log":"Reenable TLS verification for this host only"}],"1.301.0":[{"transition":"published class","name":"Repository","log":"Repository for updates"},{"transition":"published field","name":"Repository.binary_url","log":"Base URL of binary packages in this repository"},{"transition":"published field","name":"Repository.hash","log":"SHA256 checksum of latest updateinfo.xml.gz in this repository if its 'update' is true"},{"transition":"published field","name":"Repository.source_url","log":"Base URL of source packages in this repository"},{"transition":"published field","name":"Repository.up_to_date","log":"True if all hosts in pool is up to date with this repository"},{"transition":"published field","name":"Repository.update","log":"True if updateinfo.xml in this repository needs to be parsed"},{"transition":"published field","name":"Repository.uuid","log":"Unique identifier/object reference"},{"transition":"published field","name":"pool.repositories","log":"The set of currently enabled repositories"},{"transition":"published message","name":"Repository.forget","log":"Remove the repository record from the database"},{"transition":"published message","name":"Repository.introduce","log":"Add the configuration for a new repository"},{"transition":"published message","name":"host.apply_updates","log":"apply updates from current enabled repository on a host"},{"transition":"published message","name":"pool.add_repository","log":"Add a repository to the enabled set"},{"transition":"published message","name":"pool.remove_repository","log":"Remove a repository from the enabled set"},{"transition":"published message","name":"pool.set_repositories","log":"Set enabled set of repositories"}],"1.303.0":[{"transition":"published field","name":"VM.pending_guidances","log":"The set of pending mandatory guidances after applying updates, which must be applied, as otherwise there may be e.g. VM failures"},{"transition":"published field","name":"host.pending_guidances","log":"The set of pending mandatory guidances after applying updates, which must be applied, as otherwise there may be e.g. VM failures"}],"1.304.0":[{"transition":"published message","name":"pool.check_update_readiness","log":"Check if the pool is ready to be updated. If not, report the reasons."}],"1.307.0":[{"transition":"published message","name":"host.refresh_server_certificate","log":"Replace the internal self-signed host certficate with a new one."}],"1.313.0":[{"transition":"published field","name":"host.tls_verification_enabled","log":"True if this host has TLS verifcation enabled"},{"transition":"extended field","name":"message.cls","log":"Added Certificate class"}],"1.318.0":[{"transition":"published field","name":"pool.client_certificate_auth_enabled","log":"True if authentication by TLS client certificates is enabled"},{"transition":"published field","name":"pool.client_certificate_auth_name","log":"The name (CN/SAN) that an incoming client certificate must have to allow authentication"},{"transition":"published message","name":"pool.disable_client_certificate_auth","log":"Disable client certificate authentication on the pool"},{"transition":"published message","name":"pool.enable_client_certificate_auth","log":"Enable client certificate authentication on the pool"}],"1.329.0":[{"transition":"published message","name":"pool.sync_updates","log":"Sync with the enabled repository"}],"21.2.0":[{"transition":"published field","name":"session.client_certificate","log":"indicates whether this session was authenticated using a client certificate"}],"21.3.0":[{"transition":"published field","name":"pool.repository_proxy_password","log":"Password for the authentication of the proxy used in syncing with the enabled repositories"},{"transition":"published field","name":"pool.repository_proxy_url","log":"Url of the proxy used in syncing with the enabled repositories"},{"transition":"published field","name":"pool.repository_proxy_username","log":"Username for the authentication of the proxy used in syncing with the enabled repositories"},{"transition":"published message","name":"pool.configure_repository_proxy","log":"Configure proxy for RPM package repositories."},{"transition":"published message","name":"task.set_error_info","log":"Set the task error info"},{"transition":"published message","name":"task.set_result","log":"Set the task result"}],"21.4.0":[{"transition":"published message","name":"pool.disable_repository_proxy","log":"Disable the proxy for RPM package repositories."}],"22.5.0":[{"transition":"published field","name":"role.is_internal","log":"Indicates whether the role is only to be assigned internally by xapi, or can be used by clients"}],"22.12.0":[{"transition":"prototyped field","name":"Repository.gpgkey_path","log":""},{"transition":"prototyped message","name":"Repository.set_gpgkey_path","log":""}],"22.16.0":[{"transition":"published message","name":"pool.set_uefi_certificates","log":"Set the UEFI certificates for a pool and all its hosts. Deprecated: use set_custom_uefi_certificates instead"},{"transition":"changed field","name":"pool.uefi_certificates","log":"Became StaticRO to be editable through new method"},{"transition":"deprecated field","name":"host.uefi_certificates","log":"Use Pool.uefi_certificates instead"},{"transition":"deprecated message","name":"host.set_uefi_certificates","log":"Use Pool.set_uefi_certificates instead"}],"22.19.0":[{"transition":"prototyped message","name":"message.destroy_many","log":""}],"22.20.0":[{"transition":"prototyped field","name":"host.last_software_update","log":""}],"22.26.0":[{"transition":"prototyped class","name":"VTPM","log":""},{"transition":"prototyped field","name":"VTPM.is_protected","log":""},{"transition":"prototyped field","name":"VTPM.is_unique","log":""},{"transition":"prototyped field","name":"VTPM.persistence_backend","log":""},{"transition":"prototyped message","name":"VTPM.create","log":""},{"transition":"prototyped message","name":"VTPM.destroy","log":""}],"22.27.0":[{"transition":"prototyped field","name":"host.https_only","log":""},{"transition":"prototyped message","name":"host.set_https_only","log":""},{"transition":"prototyped message","name":"pool.set_https_only","log":""}],"22.33.0":[{"transition":"prototyped field","name":"pool.migration_compression","log":""}],"22.37.0":[{"transition":"prototyped field","name":"pool.coordinator_bias","log":""}],"23.1.0":[{"transition":"prototyped field","name":"VM.actions_after_softreboot","log":""}],"23.9.0":[{"transition":"prototyped field","name":"pool.telemetry_frequency","log":""},{"transition":"prototyped field","name":"pool.telemetry_next_collection","log":""},{"transition":"prototyped field","name":"pool.telemetry_uuid","log":""},{"transition":"prototyped message","name":"pool.reset_telemetry_uuid","log":""},{"transition":"prototyped message","name":"pool.set_telemetry_next_collection","log":""},{"transition":"changed field","name":"pool.repository_proxy_password","log":"Changed internal_only to false"}],"23.14.0":[{"transition":"prototyped class","name":"Observer","log":""},{"transition":"prototyped field","name":"Observer.attributes","log":""},{"transition":"prototyped field","name":"Observer.components","log":""},{"transition":"prototyped field","name":"Observer.enabled","log":""},{"transition":"prototyped field","name":"Observer.endpoints","log":""},{"transition":"prototyped field","name":"Observer.hosts","log":""},{"transition":"prototyped field","name":"Observer.uuid","log":""},{"transition":"prototyped message","name":"Observer.set_attributes","log":""},{"transition":"prototyped message","name":"Observer.set_components","log":""},{"transition":"prototyped message","name":"Observer.set_enabled","log":""},{"transition":"prototyped message","name":"Observer.set_endpoints","log":""},{"transition":"prototyped message","name":"Observer.set_hosts","log":""}],"23.18.0":[{"transition":"prototyped field","name":"host.latest_synced_updates_applied","log":""},{"transition":"prototyped field","name":"pool.last_update_sync","log":""},{"transition":"prototyped field","name":"pool.update_sync_day","log":""},{"transition":"prototyped field","name":"pool.update_sync_enabled","log":""},{"transition":"prototyped field","name":"pool.update_sync_frequency","log":""},{"transition":"prototyped message","name":"host.apply_recommended_guidances","log":""},{"transition":"prototyped message","name":"pool.configure_update_sync","log":""},{"transition":"prototyped message","name":"pool.set_update_sync_enabled","log":""},{"transition":"removed field","name":"Repository.up_to_date","log":"The up_to_date field of repository was removed"}],"23.25.0":[{"transition":"removed message","name":"host.apply_recommended_guidances","log":""}],"23.27.0":[{"transition":"prototyped field","name":"pool.ext_auth_max_threads","log":""},{"transition":"prototyped field","name":"pool.local_auth_max_threads","log":""},{"transition":"prototyped message","name":"pool.set_ext_auth_max_threads","log":""},{"transition":"prototyped message","name":"pool.set_local_auth_max_threads","log":""},{"transition":"extended message","name":"host.evacuate","log":"Choose batch size of VM evacuation."}],"23.30.0":[{"transition":"prototyped message","name":"VM.restart_device_models","log":""}],"24.0.0":[{"transition":"prototyped field","name":"host.numa_affinity_policy","log":""},{"transition":"prototyped field","name":"pool.custom_uefi_certificates","log":""},{"transition":"prototyped message","name":"host.set_numa_affinity_policy","log":""},{"transition":"prototyped message","name":"pool.set_custom_uefi_certificates","log":""},{"transition":"deprecated message","name":"pool.set_uefi_certificates","log":"use set_custom_uefi_certificates instead"}],"24.3.0":[{"transition":"prototyped field","name":"Cluster.is_quorate","log":""},{"transition":"prototyped field","name":"Cluster.live_hosts","log":""},{"transition":"prototyped field","name":"Cluster.quorum","log":""},{"transition":"prototyped field","name":"Cluster_host.last_update_live","log":""},{"transition":"prototyped field","name":"Cluster_host.live","log":""}],"24.10.0":[{"transition":"prototyped field","name":"VM.pending_guidances_full","log":""},{"transition":"prototyped field","name":"VM.pending_guidances_recommended","log":""},{"transition":"prototyped field","name":"host.last_update_hash","log":""},{"transition":"prototyped field","name":"host.pending_guidances_full","log":""},{"transition":"prototyped field","name":"host.pending_guidances_recommended","log":""},{"transition":"prototyped message","name":"host.emergency_clear_mandatory_guidance","log":""}],"24.14.0":[{"transition":"prototyped message","name":"PCI.disable_dom0_access","log":""},{"transition":"prototyped message","name":"PCI.enable_dom0_access","log":""},{"transition":"prototyped message","name":"PCI.get_dom0_access_status","log":""},{"transition":"changed field","name":"VM.has_vendor_device","log":"New default and not consulting Pool.policy_no_vendor_device"},{"transition":"deprecated field","name":"PGPU.dom0_access","log":"Use PCI.get_dom0_access_status instead."},{"transition":"deprecated field","name":"pool.policy_no_vendor_device","log":"No longer considered by VM.create"},{"transition":"deprecated message","name":"PGPU.disable_dom0_access","log":"Use PCI.disable_dom0_access instead."},{"transition":"deprecated message","name":"PGPU.enable_dom0_access","log":"Use PCI.enable_dom0_access instead."}],"24.15.0":[],"24.16.0":[{"transition":"extended class","name":"sr_stat","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.clustered","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.free_space","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.health","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.name_description","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.name_label","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.total_space","log":"Enum extended with 'unreachable' and 'unavailable' values"},{"transition":"extended field","name":"sr_stat.uuid","log":"Enum extended with 'unreachable' and 'unavailable' values"}]}
diff --git a/doc/data/releases.yml b/doc/data/releases.yml
index b624bf3ce98..803f392f810 100644
--- a/doc/data/releases.yml
+++ b/doc/data/releases.yml
@@ -1,3 +1,5 @@
+24.16.0: XAPI 24.16.0
+24.14.0: XAPI 24.14.0
24.10.0: XAPI 24.10.0
24.3.0: XAPI 24.3.0
24.0.0: XAPI 24.0.0
diff --git a/doc/layouts/xenapi/release.html b/doc/layouts/xenapi/release.html
new file mode 100644
index 00000000000..1d429465808
--- /dev/null
+++ b/doc/layouts/xenapi/release.html
@@ -0,0 +1,32 @@
+{{- partial "header.html" . }}
+
+{{ $r := .Page.Params.release }}
+
+
+{{ index $.Site.Data.releases $r }}
+
+
+Code name: "{{ $r }}".
+
+{{ with index $.Site.Data.release_info $r }}
+
+Changes
+
+
+ Change Element Description
+ {{ range . }}
+
+ {{ .transition | humanize }}
+ {{ .name }}
+ {{ .log }}
+
+ {{ end }}
+
+
+{{ else }}
+
+No changes.
+
+{{ end }}
+
+{{- partial "footer.html" . }}
\ No newline at end of file
diff --git a/doc/make-class-pages.py b/doc/make-class-pages.py
index 5ec84bb3347..630e7dfe192 100755
--- a/doc/make-class-pages.py
+++ b/doc/make-class-pages.py
@@ -5,7 +5,10 @@
xenapi_json = "data/xenapi.json"
classes_root = "content/xen-api/classes/"
-def template(c):
+releases_yml = "data/releases.yml"
+releases_root = "content/xen-api/releases/"
+
+def class_template(c):
return f"""+++
title = "{c}"
layout = "class"
@@ -15,10 +18,28 @@ class = "{c}"
"""
def classes():
- with open(xenapi_json) as f:
- xenapi = json.load(f)
+ with open(xenapi_json, encoding="utf-8") as f_classes:
+ xenapi = json.load(f_classes)
return [c['name'] for c in xenapi]
-for c in classes():
- with open(f"{classes_root}{c.lower()}.md", 'w') as f:
- f.write(template(c))
+for cls in classes():
+ with open(f"{classes_root}{cls.lower()}.md", 'w', encoding="utf-8") as f:
+ f.write(class_template(cls))
+
+def release_template(a, b, w):
+ return f"""+++
+title = "{b}"
+layout = "release"
+type = "xenapi"
+release = "{a}"
+weight = {w}
++++
+"""
+
+def releases():
+ with open(releases_yml, encoding="utf-8") as f_releases:
+ return [l.strip().split(': ') for l in f_releases.readlines()]
+
+for weight, (code_name, title) in enumerate(releases()):
+ with open(f"{releases_root}{code_name.lower()}.md", 'w', encoding="utf-8") as f:
+ f.write(release_template(code_name, title, weight+1))
diff --git a/dune-project b/dune-project
index 87ebda7a964..3240d722d69 100644
--- a/dune-project
+++ b/dune-project
@@ -15,6 +15,20 @@
(name zstd)
)
+
+(package
+ (name clock)
+ (synopsis "Xapi's library for managing time")
+ (authors "Jonathan Ludlam" "Pau Ruiz Safont")
+ (depends
+ (ocaml (>= 4.12))
+ (alcotest :with-test)
+ astring
+ mtime
+ ptime
+ )
+)
+
(package
(name xapi-rrdd-plugin)
)
@@ -451,19 +465,17 @@ This package provides an Lwt compatible interface to the library.")
(package
(name xapi-stdext-date)
(synopsis "Xapi's standard library extension, Dates")
+ (authors "Jonathan Ludlam")
(depends
- (ocaml (>= 4.12))
- (alcotest :with-test)
- astring
- base-unix
+ (clock (= :version))
ptime
- (odoc :with-doc)
)
)
(package
(name xapi-stdext-encodings)
(synopsis "Xapi's standard library extension, Encodings")
+ (authors "Jonathan Ludlam")
(depends
(ocaml (>= 4.13.0))
(alcotest (and (>= 0.6.0) :with-test))
@@ -477,6 +489,7 @@ This package provides an Lwt compatible interface to the library.")
(package
(name xapi-stdext-pervasives)
(synopsis "Xapi's standard library extension, Pervasives")
+ (authors "Jonathan Ludlam")
(depends
(ocaml (>= 4.08))
logs
@@ -498,6 +511,7 @@ This package provides an Lwt compatible interface to the library.")
(package
(name xapi-stdext-threads)
(synopsis "Xapi's standard library extension, Threads")
+ (authors "Jonathan Ludlam")
(depends
ocaml
base-threads
@@ -510,6 +524,7 @@ This package provides an Lwt compatible interface to the library.")
(package
(name xapi-stdext-unix)
(synopsis "Xapi's standard library extension, Unix")
+ (authors "Jonathan Ludlam")
(depends
(ocaml (>= 4.12.0))
base-unix
@@ -524,6 +539,7 @@ This package provides an Lwt compatible interface to the library.")
(package
(name xapi-stdext-zerocheck)
(synopsis "Xapi's standard library extension, Zerocheck")
+ (authors "Jonathan Ludlam")
(depends
ocaml
(odoc :with-doc)
diff --git a/ocaml/forkexecd/lib/fe_argv.ml b/ocaml/forkexecd/lib/fe_argv.ml
index 5c5247ad551..e7f6a5eeb41 100644
--- a/ocaml/forkexecd/lib/fe_argv.ml
+++ b/ocaml/forkexecd/lib/fe_argv.ml
@@ -43,7 +43,7 @@ module Add = struct
let each f xs = xs |> List.map f |> List.concat |> many
let fmt fmt =
- Printf.kprintf (fun str s -> ((), {s with argv= str :: s.argv})) fmt
+ Printf.ksprintf (fun str s -> ((), {s with argv= str :: s.argv})) fmt
let file_descr uuid fd s = ((), {s with fds= (uuid, fd) :: s.fds})
diff --git a/ocaml/forkexecd/lib/forkhelpers.ml b/ocaml/forkexecd/lib/forkhelpers.ml
index ef1ca459f16..7b7fc0b2247 100644
--- a/ocaml/forkexecd/lib/forkhelpers.ml
+++ b/ocaml/forkexecd/lib/forkhelpers.ml
@@ -82,7 +82,7 @@ let waitpid (sock, pid) =
let waitpid_nohang (sock, pid) =
let verbose = false in
if verbose then D.debug "%s pid=%d" __FUNCTION__ pid ;
- let fail fmt = Printf.kprintf failwith fmt in
+ let fail fmt = Printf.ksprintf failwith fmt in
Unix.set_nonblock sock ;
match Fecomms.read_raw_rpc sock with
| Ok Fe.(Finished (WEXITED n)) ->
diff --git a/ocaml/forkexecd/src/child.ml b/ocaml/forkexecd/src/child.ml
index 0bdb5fc1dc1..ef4ad887f31 100644
--- a/ocaml/forkexecd/src/child.ml
+++ b/ocaml/forkexecd/src/child.ml
@@ -1,5 +1,5 @@
let debug (fmt : ('a, unit, string, unit) format4) =
- Printf.kprintf (fun s -> Printf.fprintf stderr "%s\n" s) fmt
+ Printf.ksprintf (fun s -> Printf.fprintf stderr "%s\n" s) fmt
exception Cancelled
diff --git a/ocaml/forkexecd/src/fe_debug.ml b/ocaml/forkexecd/src/fe_debug.ml
index 21bf64c45b6..f1a0df2a17e 100644
--- a/ocaml/forkexecd/src/fe_debug.ml
+++ b/ocaml/forkexecd/src/fe_debug.ml
@@ -18,7 +18,7 @@ let gettimestring () =
let reset () = debug_log := []
let debug (fmt : ('a, unit, string, unit) format4) =
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
debug_log :=
Printf.sprintf "%s|%d|%s\n" (gettimestring ()) (Unix.getpid ()) s
diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml
index 9345bd18313..eca871fa6d5 100644
--- a/ocaml/idl/datamodel.ml
+++ b/ocaml/idl/datamodel.ml
@@ -7866,6 +7866,7 @@ let all_system =
; Datamodel_diagnostics.t
; Datamodel_repository.t
; Datamodel_observer.t
+ ; Datamodel_vm_group.t
]
(* If the relation is one-to-many, the "many" nodes (one edge each) must come before the "one" node (many edges) *)
@@ -7946,6 +7947,7 @@ let all_relations =
; ((_network_sriov, "physical_PIF"), (_pif, "sriov_physical_PIF_of"))
; ((_network_sriov, "logical_PIF"), (_pif, "sriov_logical_PIF_of"))
; ((_certificate, "host"), (_host, "certificates"))
+ ; ((_vm, "groups"), (_vm_group, "VMs"))
]
let update_lifecycles =
@@ -8077,6 +8079,7 @@ let expose_get_all_messages_for =
; _vmpp
; _vmss
; _vm_appliance
+ ; _vm_group
; _pci
; _pgpu
; _gpu_group
diff --git a/ocaml/idl/datamodel_certificate.ml b/ocaml/idl/datamodel_certificate.ml
index ac77887b9f0..409d35e8233 100644
--- a/ocaml/idl/datamodel_certificate.ml
+++ b/ocaml/idl/datamodel_certificate.ml
@@ -64,8 +64,16 @@ let t =
; field ~qualifier:StaticRO ~lifecycle ~ty:DateTime "not_after"
~default_value:(Some (VDateTime Date.never))
"Date before which the certificate is valid"
- ; field ~qualifier:StaticRO ~lifecycle ~ty:String "fingerprint"
+ ; field ~qualifier:StaticRO
+ ~lifecycle:
+ [(Published, rel_stockholm, ""); (Deprecated, "24.19.0", "")]
+ ~ty:String "fingerprint" ~default_value:(Some (VString ""))
+ "Use fingerprint_sha256 instead"
+ ; field ~qualifier:StaticRO ~lifecycle ~ty:String "fingerprint_sha256"
~default_value:(Some (VString ""))
"The certificate's SHA256 fingerprint / hash"
+ ; field ~qualifier:StaticRO ~lifecycle ~ty:String "fingerprint_sha1"
+ ~default_value:(Some (VString ""))
+ "The certificate's SHA1 fingerprint / hash"
]
~messages:[] ()
diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml
index 64e3481dc21..de22cf2e5ad 100644
--- a/ocaml/idl/datamodel_common.ml
+++ b/ocaml/idl/datamodel_common.ml
@@ -10,7 +10,7 @@ open Datamodel_roles
to leave a gap for potential hotfixes needing to increment the schema version.*)
let schema_major_vsn = 5
-let schema_minor_vsn = 778
+let schema_minor_vsn = 779
(* Historical schema versions just in case this is useful later *)
let rio_schema_major_vsn = 5
@@ -205,6 +205,8 @@ let _vm_guest_metrics = "VM_guest_metrics"
let _vm_appliance = "VM_appliance"
+let _vm_group = "VM_group"
+
let _dr_task = "DR_task"
let _vmpp = "VMPP"
diff --git a/ocaml/idl/datamodel_errors.ml b/ocaml/idl/datamodel_errors.ml
index 81dc1e10ed2..04d56597ea8 100644
--- a/ocaml/idl/datamodel_errors.ml
+++ b/ocaml/idl/datamodel_errors.ml
@@ -916,7 +916,10 @@ let _ =
error Api_errors.wlb_timeout ["configured_timeout"]
~doc:"The communication with the WLB server timed out." () ;
error Api_errors.wlb_authentication_failed []
- ~doc:"WLB rejected our configured authentication details." () ;
+ ~doc:
+ "Failed to authenticate with the WLB server, the provided credentials \
+ are invalid."
+ () ;
error Api_errors.wlb_malformed_request []
~doc:"WLB rejected the server's request as malformed." () ;
error Api_errors.wlb_malformed_response
@@ -1963,6 +1966,8 @@ let _ =
error Api_errors.host_evacuation_is_required ["host"]
~doc:"Host evacuation is required before applying updates." () ;
+ error Api_errors.too_many_groups [] ~doc:"VM can only belong to one group." () ;
+
message
(fst Api_messages.ha_pool_overcommitted)
~doc:
diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml
index 11d84ce22e3..4e7336dc2d6 100644
--- a/ocaml/idl/datamodel_pool.ml
+++ b/ocaml/idl/datamodel_pool.ml
@@ -1511,6 +1511,11 @@ let t =
; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:Bool
~default_value:(Some (VBool false)) "update_sync_enabled"
"Whether periodic update synchronization is enabled or not"
+ ; field ~qualifier:DynamicRO ~lifecycle:[]
+ ~ty:(Map (String, String))
+ ~default_value:(Some (VMap [])) "recommendations"
+ "The recommended pool properties for clients to respect for \
+ optimal performance. e.g. max-vm-group=5"
]
)
()
diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml
index 67037dce12a..bf6fe168f8a 100644
--- a/ocaml/idl/datamodel_vm.ml
+++ b/ocaml/idl/datamodel_vm.ml
@@ -1514,6 +1514,16 @@ let set_appliance =
]
~allowed_roles:_R_POOL_OP ()
+let set_groups =
+ call ~name:"set_groups" ~lifecycle:[] ~doc:"Associate this VM with VM groups."
+ ~params:
+ [
+ (Ref _vm, "self", "The VM")
+ ; (Set (Ref _vm_group), "value", "The VM groups to set")
+ ]
+ ~errs:[Api_errors.operation_not_allowed]
+ ~allowed_roles:_R_VM_ADMIN ()
+
let call_plugin =
call ~name:"call_plugin" ~in_product_since:rel_cream
~doc:"Call an API plugin on this vm"
@@ -1887,6 +1897,7 @@ let t =
; recover
; import_convert
; set_appliance
+ ; set_groups
; query_services
; call_plugin
; set_has_vendor_device
@@ -2218,6 +2229,8 @@ let t =
user should follow to make some updates, e.g. specific hardware \
drivers or CPU features, fully effective, but the 'average user' \
doesn't need to"
+ ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:(Set (Ref _vm_group))
+ "groups" "VM groups associated with the VM"
]
)
()
diff --git a/ocaml/idl/datamodel_vm_group.ml b/ocaml/idl/datamodel_vm_group.ml
new file mode 100644
index 00000000000..58016a31d0a
--- /dev/null
+++ b/ocaml/idl/datamodel_vm_group.ml
@@ -0,0 +1,43 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+open Datamodel_types
+open Datamodel_common
+open Datamodel_roles
+
+let placement_policy =
+ Enum
+ ( "placement_policy"
+ , [
+ ("anti_affinity", "Anti-affinity placement policy")
+ ; ("normal", "Default placement policy")
+ ]
+ )
+
+let t =
+ create_obj ~name:_vm_group ~descr:"A VM group" ~doccomments:[]
+ ~gen_constructor_destructor:true ~gen_events:true ~in_db:true ~lifecycle:[]
+ ~persist:PersistEverything ~in_oss_since:None
+ ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[]
+ ~contents:
+ [
+ uid _vm_group
+ ; namespace ~name:"name" ~contents:(names None RW) ()
+ ; field ~qualifier:StaticRO ~lifecycle:[] ~ty:placement_policy "placement"
+ ~default_value:(Some (VEnum "normal"))
+ "The placement policy of the VM group"
+ ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:(Set (Ref _vm)) "VMs"
+ "The list of VMs associated with the group"
+ ]
+ ()
diff --git a/ocaml/idl/dune b/ocaml/idl/dune
index 713462e7ffa..837c3b0013a 100644
--- a/ocaml/idl/dune
+++ b/ocaml/idl/dune
@@ -6,7 +6,7 @@
datamodel_pool datamodel_cluster datamodel_cluster_host dm_api escaping
datamodel_values datamodel_schema datamodel_certificate
datamodel_diagnostics datamodel_repository datamodel_lifecycle
- datamodel_vtpm datamodel_observer)
+ datamodel_vtpm datamodel_observer datamodel_vm_group)
(libraries
ppx_sexp_conv.runtime-lib
rpclib.core
diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml
index 7bdc6f21276..f2ee8fe4be2 100644
--- a/ocaml/idl/schematest.ml
+++ b/ocaml/idl/schematest.ml
@@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex
(* BEWARE: if this changes, check that schema has been bumped accordingly in
ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *)
-let last_known_schema_hash = "6566a4091ecb3200649185730e4f185d"
+let last_known_schema_hash = "efdb1c7e536362523741ccdb7f33f797"
let current_schema_hash : string =
let open Datamodel_types in
diff --git a/ocaml/libs/clock/date.ml b/ocaml/libs/clock/date.ml
new file mode 100644
index 00000000000..a4a43cde623
--- /dev/null
+++ b/ocaml/libs/clock/date.ml
@@ -0,0 +1,170 @@
+(* Copyright (C) Cloud Software Group Inc.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; version 2.1 only. with the special
+ exception on linking described in file LICENSE.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+*)
+
+let months =
+ [|
+ "Jan"
+ ; "Feb"
+ ; "Mar"
+ ; "Apr"
+ ; "May"
+ ; "Jun"
+ ; "Jul"
+ ; "Aug"
+ ; "Sep"
+ ; "Oct"
+ ; "Nov"
+ ; "Dec"
+ |]
+
+let days = [|"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"|]
+
+type print_timezone = Empty | TZ of string
+
+(* we must store the print_type with iso8601 to handle the case where the local time zone is UTC *)
+type t = Ptime.date * Ptime.time * print_timezone
+
+let utc = TZ "Z"
+
+let of_dt print_type dt =
+ let date, time = dt in
+ (date, time, print_type)
+
+let to_dt (date, time, _) = (date, time)
+
+let best_effort_iso8601_to_rfc3339 x =
+ (* (a) add dashes
+ * (b) add UTC tz if no tz provided *)
+ let x =
+ try
+ Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest ->
+ Printf.sprintf "%04d-%02d-%02dT%s" y mon d rest
+ )
+ with _ -> x
+ in
+ let tz =
+ try
+ Scanf.sscanf x "%04d-%02d-%02dT%02d:%02d:%02d%s" (fun _ _ _ _ _ _ tz ->
+ Some tz
+ )
+ with _ -> None
+ in
+ match tz with
+ | None | Some "" ->
+ (* the caller didn't specify a tz. we must try to add one so that ptime can at least attempt to parse *)
+ (Printf.sprintf "%sZ" x, Empty)
+ | Some tz ->
+ (x, TZ tz)
+
+let of_iso8601 x =
+ let rfc3339, print_timezone = best_effort_iso8601_to_rfc3339 x in
+ match Ptime.of_rfc3339 rfc3339 |> Ptime.rfc3339_error_to_msg with
+ | Error _ ->
+ invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x)
+ | Ok (t, tz, _) -> (
+ match tz with
+ | None | Some 0 ->
+ Ptime.to_date_time t |> of_dt print_timezone
+ | Some _ ->
+ invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x)
+ )
+
+let to_rfc3339 ((y, mon, d), ((h, min, s), _), print_type) =
+ match print_type with
+ | TZ tz ->
+ Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz
+ | Empty ->
+ Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i" y mon d h min s
+
+(* Extracted from tondering.dk/claus/cal/chrweek.php#calcdow *)
+let weekday ~year ~mon ~day =
+ let a = (14 - mon) / 12 in
+ let y = year - a in
+ let m = mon + (12 * a) - 2 in
+ (day + y + (y / 4) - (y / 100) + (y / 400) + (31 * m / 12)) mod 7
+
+let to_rfc822 ((year, mon, day), ((h, min, s), _), print_type) =
+ let timezone =
+ match print_type with Empty | TZ "Z" -> "GMT" | TZ tz -> tz
+ in
+ let weekday = weekday ~year ~mon ~day in
+ Printf.sprintf "%s, %d %s %d %02d:%02d:%02d %s" days.(weekday) day
+ months.(mon - 1)
+ year h min s timezone
+
+let to_ptime_t t =
+ match to_dt t |> Ptime.of_date_time with
+ | Some t ->
+ t
+ | None ->
+ let _, (_, offset), _ = t in
+ invalid_arg
+ (Printf.sprintf "%s: dt='%s', offset='%i' is invalid" __FUNCTION__
+ (to_rfc3339 t) offset
+ )
+
+let to_ptime = to_ptime_t
+
+let of_ptime t = Ptime.to_date_time t |> of_dt utc
+
+let of_unix_time s =
+ match Ptime.of_float_s s with
+ | None ->
+ invalid_arg (Printf.sprintf "%s: %f" __FUNCTION__ s)
+ | Some t ->
+ of_ptime t
+
+let to_unix_time t = to_ptime_t t |> Ptime.to_float_s
+
+let _localtime current_tz_offset t =
+ let tz_offset_s = current_tz_offset |> Option.value ~default:0 in
+ let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt Empty in
+ let _, (_, localtime_offset), _ = localtime in
+ if localtime_offset <> tz_offset_s then
+ invalid_arg
+ (Printf.sprintf "%s: offsets don't match. offset='%i', t='%s'"
+ __FUNCTION__ tz_offset_s (Ptime.to_rfc3339 t)
+ ) ;
+ localtime
+
+let _localtime_string current_tz_offset t =
+ _localtime current_tz_offset t |> to_rfc3339
+
+let localtime () =
+ _localtime (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ())
+
+let now () = of_ptime (Ptime_clock.now ())
+
+let epoch = of_ptime Ptime.epoch
+
+let is_earlier ~than t = Ptime.is_earlier ~than:(to_ptime than) (to_ptime t)
+
+let is_later ~than t = Ptime.is_later ~than:(to_ptime than) (to_ptime t)
+
+let diff a b = Ptime.diff (to_ptime a) (to_ptime b)
+
+let compare_print_tz a b =
+ match (a, b) with
+ | Empty, Empty ->
+ 0
+ | TZ a_s, TZ b_s ->
+ String.compare a_s b_s
+ | Empty, TZ _ ->
+ -1
+ | TZ _, Empty ->
+ 1
+
+let compare ((_, _, a_z) as a) ((_, _, b_z) as b) =
+ let ( > ) a b = if a = 0 then b else a in
+ Ptime.compare (to_ptime a) (to_ptime b) > compare_print_tz a_z b_z
+
+let eq x y = compare x y = 0
diff --git a/ocaml/libs/clock/date.mli b/ocaml/libs/clock/date.mli
new file mode 100644
index 00000000000..2a0123813b3
--- /dev/null
+++ b/ocaml/libs/clock/date.mli
@@ -0,0 +1,76 @@
+(*
+ * Copyright (C) 2006-2009 Citrix Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+(** date-time with support for keeping timezone for ISO 8601 conversion *)
+type t
+
+(** Conversions *)
+
+val of_ptime : Ptime.t -> t
+(** Convert ptime to time in UTC *)
+
+val to_ptime : t -> Ptime.t
+(** Convert date/time to a ptime value: the number of seconds since 00:00:00
+ UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *)
+
+val of_unix_time : float -> t
+(** Convert calendar time [x] (as returned by e.g. Unix.time), to time in UTC *)
+
+val to_unix_time : t -> float
+(** Convert date/time to a unix timestamp: the number of seconds since
+ 00:00:00 UTC, 1 Jan 1970. Assumes the underlying {!t} is in UTC *)
+
+val to_rfc822 : t -> string
+(** Convert date/time to email-formatted (RFC 822) string. *)
+
+val to_rfc3339 : t -> string
+(** Convert date/time to an RFC-3339-formatted string. It also complies with
+ the ISO 8601 format *)
+
+val of_iso8601 : string -> t
+(** Convert ISO 8601 formatted string to a date/time value. Does not accept a
+ timezone annotated datetime - i.e. string must be UTC, and end with a Z *)
+
+val epoch : t
+(** 00:00:00 UTC, 1 Jan 1970, in UTC *)
+
+val now : unit -> t
+(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in UTC *)
+
+val _localtime_string : Ptime.tz_offset_s option -> Ptime.t -> string
+(** exposed for testing *)
+
+val localtime : unit -> t
+(** Count the number of seconds passed since 00:00:00 UTC, 1 Jan 1970, in local
+ time *)
+
+(** Comparisons *)
+
+val eq : t -> t -> bool
+(** [eq a b] returns whether [a] and [b] are equal *)
+
+val compare : t -> t -> int
+(** [compare a b] returns -1 if [a] is earlier than [b], 1 if [a] is later than
+ [b] or the ordering of the timezone printer *)
+
+val is_earlier : than:t -> t -> bool
+(** [is_earlier ~than a] returns whether the timestamp [a] happens before
+ [than] *)
+
+val is_later : than:t -> t -> bool
+(** [is_later ~than a] returns whether the timestamp [a] happens after [than]
+ *)
+
+val diff : t -> t -> Ptime.Span.t
+(** [diff a b] returns the span of time corresponding to [a - b] *)
diff --git a/ocaml/libs/clock/dune b/ocaml/libs/clock/dune
new file mode 100644
index 00000000000..009e2ba7176
--- /dev/null
+++ b/ocaml/libs/clock/dune
@@ -0,0 +1,20 @@
+(library
+ (name clock)
+ (public_name clock)
+ (modules date timer)
+ (libraries
+ astring
+ fmt
+ (re_export mtime)
+ mtime.clock.os
+ (re_export ptime)
+ ptime.clock.os
+ )
+)
+
+(tests
+ (names test_date test_timer)
+ (package clock)
+ (modules test_date test_timer)
+ (libraries alcotest clock fmt mtime ptime qcheck-core qcheck-core.runner)
+)
diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/test.ml b/ocaml/libs/clock/test_date.ml
similarity index 97%
rename from ocaml/libs/xapi-stdext/lib/xapi-stdext-date/test.ml
rename to ocaml/libs/clock/test_date.ml
index c839722d81f..78f673f635c 100644
--- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/test.ml
+++ b/ocaml/libs/clock/test_date.ml
@@ -1,4 +1,4 @@
-open Xapi_stdext_date.Date
+open Clock.Date
let check_float = Alcotest.(check @@ float 1e-2)
@@ -26,8 +26,7 @@ let tests =
(* UTC is valid *)
let non_utc = "2020-12-20T18:10:19+02:00" in
let exn =
- Invalid_argument
- "Xapi_stdext_date__Date.of_iso8601: 2020-12-20T18:10:19+02:00"
+ Invalid_argument "Clock__Date.of_iso8601: 2020-12-20T18:10:19+02:00"
in
Alcotest.check_raises "only UTC is accepted" exn (fun () ->
of_iso8601 non_utc |> ignore
diff --git a/ocaml/libs/clock/test_date.mli b/ocaml/libs/clock/test_date.mli
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ocaml/libs/clock/test_timer.ml b/ocaml/libs/clock/test_timer.ml
new file mode 100644
index 00000000000..2d5e20d7d8a
--- /dev/null
+++ b/ocaml/libs/clock/test_timer.ml
@@ -0,0 +1,233 @@
+module Timer = Clock.Timer
+module Gen = QCheck2.Gen
+module Test = QCheck2.Test
+
+module QCheck_alcotest = struct
+ (* SPDX: BSD-2-Clause
+ From github.com/c-cube/qcheck
+ *)
+
+ module Q = QCheck2
+ module T = QCheck2.Test
+ module Raw = QCheck_base_runner.Raw
+
+ let seed_ =
+ lazy
+ (let s =
+ try int_of_string @@ Sys.getenv "QCHECK_SEED"
+ with _ -> Random.self_init () ; Random.int 1_000_000_000
+ in
+ Printf.printf "qcheck random seed: %d\n%!" s ;
+ s
+ )
+
+ let default_rand () =
+ (* random seed, for repeatability of tests *)
+ Random.State.make [|Lazy.force seed_|]
+
+ let verbose_ =
+ lazy
+ ( match Sys.getenv "QCHECK_VERBOSE" with
+ | "1" | "true" ->
+ true
+ | _ ->
+ false
+ | exception Not_found ->
+ false
+ )
+
+ let long_ =
+ lazy
+ ( match Sys.getenv "QCHECK_LONG" with
+ | "1" | "true" ->
+ true
+ | _ ->
+ false
+ | exception Not_found ->
+ false
+ )
+
+ let to_alcotest ?(colors = false) ?(verbose = Lazy.force verbose_)
+ ?(long = Lazy.force long_) ?(debug_shrink = None) ?debug_shrink_list
+ ?(rand = default_rand ()) (t : T.t) =
+ let (T.Test cell) = t in
+ let handler name cell r =
+ match (r, debug_shrink) with
+ | QCheck2.Test.Shrunk (step, x), Some out ->
+ let go =
+ match debug_shrink_list with
+ | None ->
+ true
+ | Some test_list ->
+ List.mem name test_list
+ in
+ if not go then
+ ()
+ else
+ QCheck_base_runner.debug_shrinking_choices ~colors ~out ~name cell
+ ~step x
+ | _ ->
+ ()
+ in
+ let print = Raw.print_std in
+ let name = T.get_name cell in
+ let run () =
+ let call = Raw.callback ~colors ~verbose ~print_res:true ~print in
+ T.check_cell_exn ~long ~call ~handler ~rand cell
+ in
+ ((name, `Slow, run) : unit Alcotest.test_case)
+end
+
+let spans =
+ Gen.oneofa ([|1; 100; 300|] |> Array.map (fun v -> Mtime.Span.(v * ms)))
+
+let test_timer_remaining =
+ let print = Fmt.to_to_string Mtime.Span.pp in
+ Test.make ~name:"Timer.remaining" ~print spans @@ fun duration ->
+ let timer = Timer.start ~duration in
+ let half = Timer.span_to_s duration /. 2. in
+ let elapsed = Mtime_clock.counter () in
+ Printf.printf "Sleeping for %f seconds...\n" half ;
+ Unix.sleepf half ;
+ let actual = Mtime_clock.count elapsed in
+ (* We expect to have slept [half] seconds, but we could've been woken up later
+ by the OS, it'll never be exact. Check that we're not too far off, or the
+ Expired / Remaining test below will be wrong.
+ The following equation must hold:
+ [duration / 2 <= actual < duration]
+ *)
+ QCheck2.assume (Timer.span_is_shorter actual ~than:duration) ;
+ QCheck2.assume
+ (not (Timer.span_is_shorter Mtime.Span.(2 * actual) ~than:duration)) ;
+ let () =
+ match Timer.remaining timer with
+ | Expired t ->
+ Test.fail_reportf
+ "Expected to have spare time, but got excess: %a. Duration: %a, \
+ actual: %a, timer: %a"
+ Mtime.Span.pp t Mtime.Span.pp duration Mtime.Span.pp actual Timer.pp
+ timer
+ | Remaining t ->
+ if Timer.span_is_longer Mtime.Span.(2 * t) ~than:duration then
+ Test.fail_reportf
+ "Expected to have less than half spare time, but got: %a. \
+ Duration: %a, actual: %a, timer: %a"
+ Mtime.Span.pp t Mtime.Span.pp duration Mtime.Span.pp actual Timer.pp
+ timer
+ in
+
+ (* 3 * half > duration, so we expect Excess to be reported now *)
+ Unix.sleepf (2. *. half) ;
+ let actual = Mtime_clock.count elapsed in
+ QCheck2.assume (Timer.span_is_longer actual ~than:duration) ;
+ let () =
+ match Timer.remaining timer with
+ | Expired _ ->
+ ()
+ | Remaining t ->
+ Test.fail_reportf
+ "Expected to have excess time, but got spare: %a. Duration: %a, \
+ actual: %a, timer: %a"
+ Mtime.Span.pp t Mtime.Span.pp duration Mtime.Span.pp actual Timer.pp
+ timer
+ in
+ if not (Timer.has_expired timer) then
+ Test.fail_reportf "Expected Timer to have expired. Duration: %a, timer: %a"
+ Mtime.Span.pp duration Timer.pp timer ;
+ true
+
+let tests_timer = List.map QCheck_alcotest.to_alcotest [test_timer_remaining]
+
+let combinations =
+ let pair x y = (x, y) in
+ let rec loop acc = function
+ | x :: xs ->
+ let acc = List.map (pair x) xs :: acc in
+ loop acc xs
+ | [] ->
+ List.(concat (rev acc))
+ in
+ loop []
+
+let test_span_compare =
+ let shortest = Mtime.Span.zero in
+ let long = Mtime.Span.of_uint64_ns Int64.max_int in
+ let longest = Mtime.Span.of_uint64_ns (-1L) in
+ let spec = combinations [shortest; long; longest] in
+ let pp_spec () = Fmt.str "%a" (Fmt.Dump.pair Mtime.Span.pp Mtime.Span.pp) in
+ let test_shorter (a, b) () =
+ let ( < ) a b = Mtime.Span.compare a b < 0 in
+ Alcotest.(check bool)
+ "is_shorter doesn't match compare" (a < b)
+ (Timer.span_is_shorter a ~than:b)
+ in
+ let tests_shorter =
+ List.map
+ (fun t ->
+ (Printf.sprintf "is_shorter %a" pp_spec t, `Quick, test_shorter t)
+ )
+ spec
+ in
+ let test_longer (a, b) () =
+ let ( > ) a b = Mtime.Span.compare a b > 0 in
+ Alcotest.(check bool)
+ "is_longer doesn't match compare" (a > b)
+ (Timer.span_is_longer a ~than:b)
+ in
+ let tests_longer =
+ List.map
+ (fun t -> (Printf.sprintf "is_longer %a" pp_spec t, `Quick, test_longer t))
+ spec
+ in
+ List.concat [tests_shorter; tests_longer]
+
+let test_conversion_to_s =
+ let shortest = Mtime.Span.zero in
+ let long = Mtime.Span.(104 * day) in
+ let longer = Mtime.Span.(105 * day) in
+ let spec = [(shortest, 0.); (long, 8.9856e+06); (longer, 9.072e+06)] in
+ let pp_spec () = Fmt.str "%a" Fmt.(Dump.pair Mtime.Span.pp float) in
+ let test_span_to_s (input, expected) () =
+ let actual = Timer.span_to_s input in
+ Alcotest.(check (float Float.epsilon))
+ "seconds match span length" expected actual
+ in
+ List.map
+ (fun t ->
+ (Printf.sprintf "span_to_s %a" pp_spec t, `Quick, test_span_to_s t)
+ )
+ spec
+
+let test_conversion_from_s =
+ let span = Alcotest.testable Mtime.Span.pp Mtime.Span.equal in
+ let shortest = 0. in
+ let short_enough = 9_007_199.254_740_991 in
+ let too_long = 9_007_199.254_740_992 in
+ let neg = -1. in
+ let spec =
+ let open Mtime.Span in
+ [
+ (shortest, Some zero)
+ ; (short_enough, Some (9_007_199_254_740_991 * ns))
+ ; (too_long, None)
+ ; (neg, None)
+ ]
+ in
+ let pp_spec () =
+ Fmt.str "%a" Fmt.(Dump.pair float (Dump.option Mtime.Span.pp))
+ in
+ let test_span_to_s (input, expected) () =
+ let actual = Timer.s_to_span input in
+ Alcotest.(check @@ option span)
+ "span length matches seconds" expected actual
+ in
+ List.map
+ (fun t ->
+ (Printf.sprintf "span_to_s %a" pp_spec t, `Quick, test_span_to_s t)
+ )
+ spec
+
+let tests_span =
+ List.concat [test_conversion_to_s; test_conversion_from_s; test_span_compare]
+
+let () = Alcotest.run "Timer" [("Timer", tests_timer); ("Span", tests_span)]
diff --git a/ocaml/libs/clock/test_timer.mli b/ocaml/libs/clock/test_timer.mli
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ocaml/libs/clock/timer.ml b/ocaml/libs/clock/timer.ml
new file mode 100644
index 00000000000..b4ef6dadaf9
--- /dev/null
+++ b/ocaml/libs/clock/timer.ml
@@ -0,0 +1,54 @@
+type t = {elapsed: Mtime_clock.counter; duration: Mtime.Span.t}
+
+type countdown = Remaining of Mtime.Span.t | Expired of Mtime.Span.t
+
+let span_is_shorter a ~than:b = Mtime.Span.compare a b < 0
+
+let span_is_longer a ~than:b = Mtime.Span.compare a b > 0
+
+let start ~duration = {elapsed= Mtime_clock.counter (); duration}
+
+let duration {duration; _} = duration
+
+let elapsed t = Mtime_clock.count t.elapsed
+
+let remaining t =
+ let elapsed = Mtime_clock.count t.elapsed in
+ let difference = Mtime.Span.abs_diff elapsed t.duration in
+ if span_is_shorter elapsed ~than:t.duration then
+ Remaining difference
+ else
+ Expired difference
+
+let has_expired t =
+ let elapsed = Mtime_clock.count t.elapsed in
+ not (span_is_shorter elapsed ~than:t.duration)
+
+let shorten_by dur t =
+ let duration =
+ if span_is_longer dur ~than:t.duration then
+ Mtime.Span.zero
+ else
+ Mtime.Span.abs_diff dur t.duration
+ in
+ {t with duration}
+
+let extend_by dur t =
+ let duration = Mtime.Span.add dur t.duration in
+ {t with duration}
+
+let pp =
+ let open Fmt in
+ record
+ [
+ field "elapsed" elapsed Mtime.Span.pp
+ ; field "duration" duration Mtime.Span.pp
+ ]
+
+(* Conversion functions *)
+
+(* Rounding errors when there are more than 2^44 seconds, or about ~55 years.
+ *)
+let span_to_s span = Mtime.Span.to_float_ns span |> fun ns -> ns *. 1e-9
+
+let s_to_span s = Mtime.Span.of_float_ns (s *. 1e9 |> Float.round)
diff --git a/ocaml/libs/clock/timer.mli b/ocaml/libs/clock/timer.mli
new file mode 100644
index 00000000000..8a60bb89382
--- /dev/null
+++ b/ocaml/libs/clock/timer.mli
@@ -0,0 +1,59 @@
+(** This module is useful for knowing that a set amount of time has passed
+ since a particular moment in time. For example, to know when pasta is
+ cooked al dente. They are meant to be used by polling them. *)
+type t
+
+type countdown = Remaining of Mtime.Span.t | Expired of Mtime.Span.t
+
+val start : duration:Mtime.Span.t -> t
+(** [start ~duration] starts a timer that expires after [duration] has elapsed.
+ The elapsed time is counted in monotonic time, not in POSIX time. *)
+
+val duration : t -> Mtime.Span.t
+(** [duration timer] returns the amount of time after which the timer expires,
+ from the moment it was started. *)
+
+val has_expired : t -> bool
+(** [has_expired timer] returns whether [timer] has reached its duration. *)
+
+val elapsed : t -> Mtime.Span.t
+(** [elapsed timer] returns the amount of time elapsed since [timer] was
+ started. *)
+
+val remaining : t -> countdown
+(** [remaining timer] returns the amount of time left until [timer] expires or
+ the amount of time since it expired. *)
+
+val shorten_by : Mtime.Span.t -> t -> t
+(** [shorten_by amount timer] creates a new timer with the duration of [timer]
+ shortened by [amount]. The starting time doesn't change. The duration of a
+ timer cannot go below 0. When a timer has a duration of 0, it's always
+ considered expired. *)
+
+val extend_by : Mtime.Span.t -> t -> t
+(** [extend_by amount timer] creates a new timer with the duration of [timer]
+ delayed by [amount]. The starting time doesn't change. *)
+
+val pp : t Fmt.t
+(** [pp] pretty-prints the timer. It uses the system clock to calculate
+ the elapsed time every time the timer is printed. *)
+
+(** Mtime.Span helpers *)
+
+val span_is_shorter : Mtime.Span.t -> than:Mtime.Span.t -> bool
+(** [is_shorter dur ~than] returns whether [dur] lasts less than [than]. *)
+
+val span_is_longer : Mtime.Span.t -> than:Mtime.Span.t -> bool
+(** [is_longer dur ~than] returns whether [dur] lasts more than [than]. *)
+
+val span_to_s : Mtime.Span.t -> float
+(** [span_to_s span] converts a time span into seconds, represented by a float.
+ When the span is longer than ~55 years, rounding errors appear. Avoid
+ whenever possible, this is unavoidable when using Thread.wait functions and
+ related. *)
+
+val s_to_span : float -> Mtime.Span.t option
+(** [s_to_span seconds] converts a float representing seconds to a timespan.
+ Returns None when [seconds] is negative, is not a number or larger than
+ ~104 days. Avoid whenever possible, some RPC function already use this so
+ it needs to be available. *)
diff --git a/ocaml/libs/log/debug.ml b/ocaml/libs/log/debug.ml
index b4a5721b9e3..5e63bc2b008 100644
--- a/ocaml/libs/log/debug.ml
+++ b/ocaml/libs/log/debug.ml
@@ -318,7 +318,7 @@ functor
->
struct
let output level priority (fmt : ('a, unit, string, 'b) format4) =
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
if not (is_disabled Brand.name level) then
output_log Brand.name level priority s
@@ -336,7 +336,7 @@ functor
let critical fmt = output Syslog.Crit "critical" fmt
let audit ?(raw = false) (fmt : ('a, unit, string, 'b) format4) =
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
let msg = if raw then s else format true Brand.name "audit" s in
Syslog.log Syslog.Local6 Syslog.Info (escape msg) ;
@@ -352,3 +352,5 @@ functor
try f ()
with e -> log_backtrace_internal ~level:Syslog.Debug ~msg:"debug" e ()
end
+
+module Pp = struct let mtime_span () = Fmt.str "%a" Mtime.Span.pp end
diff --git a/ocaml/libs/log/debug.mli b/ocaml/libs/log/debug.mli
index af1b214b2fe..f6301c3d587 100644
--- a/ocaml/libs/log/debug.mli
+++ b/ocaml/libs/log/debug.mli
@@ -88,3 +88,7 @@ module Make : functor (_ : BRAND) -> DEBUG
val is_disabled : string -> Syslog.level -> bool
(** [is_disabled brand level] returns [true] if logging for [brand] at [level]
is disabled, * otherwise returns [false]. *)
+
+module Pp : sig
+ val mtime_span : unit -> Mtime.Span.t -> string
+end
diff --git a/ocaml/libs/log/dune b/ocaml/libs/log/dune
index b8b637e7bf5..fdfd739d082 100644
--- a/ocaml/libs/log/dune
+++ b/ocaml/libs/log/dune
@@ -6,6 +6,8 @@
(names syslog_stubs))
(libraries
astring
+ fmt
+ mtime
logs
threads.posix
xapi-backtrace
diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml
index 77f3994fe68..45e9bba5efb 100644
--- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml
+++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/date.ml
@@ -12,163 +12,7 @@
* GNU Lesser General Public License for more details.
*)
-let months =
- [|
- "Jan"
- ; "Feb"
- ; "Mar"
- ; "Apr"
- ; "May"
- ; "Jun"
- ; "Jul"
- ; "Aug"
- ; "Sep"
- ; "Oct"
- ; "Nov"
- ; "Dec"
- |]
-
-let days = [|"Sun"; "Mon"; "Tue"; "Wed"; "Thu"; "Fri"; "Sat"|]
-
-type print_timezone = Empty | TZ of string
-
-(* we must store the print_type with iso8601 to handle the case where the local time zone is UTC *)
-type t = Ptime.date * Ptime.time * print_timezone
-
-let utc = TZ "Z"
-
-let of_dt print_type dt =
- let date, time = dt in
- (date, time, print_type)
-
-let to_dt (date, time, _) = (date, time)
-
-let best_effort_iso8601_to_rfc3339 x =
- (* (a) add dashes
- * (b) add UTC tz if no tz provided *)
- let x =
- try
- Scanf.sscanf x "%04d%02d%02dT%s" (fun y mon d rest ->
- Printf.sprintf "%04d-%02d-%02dT%s" y mon d rest
- )
- with _ -> x
- in
- let tz =
- try
- Scanf.sscanf x "%04d-%02d-%02dT%02d:%02d:%02d%s" (fun _ _ _ _ _ _ tz ->
- Some tz
- )
- with _ -> None
- in
- match tz with
- | None | Some "" ->
- (* the caller didn't specify a tz. we must try to add one so that ptime can at least attempt to parse *)
- (Printf.sprintf "%sZ" x, Empty)
- | Some tz ->
- (x, TZ tz)
-
-let of_iso8601 x =
- let rfc3339, print_timezone = best_effort_iso8601_to_rfc3339 x in
- match Ptime.of_rfc3339 rfc3339 |> Ptime.rfc3339_error_to_msg with
- | Error _ ->
- invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x)
- | Ok (t, tz, _) -> (
- match tz with
- | None | Some 0 ->
- Ptime.to_date_time t |> of_dt print_timezone
- | Some _ ->
- invalid_arg (Printf.sprintf "%s: %s" __FUNCTION__ x)
- )
-
-let to_rfc3339 ((y, mon, d), ((h, min, s), _), print_type) =
- match print_type with
- | TZ tz ->
- Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i%s" y mon d h min s tz
- | Empty ->
- Printf.sprintf "%04i%02i%02iT%02i:%02i:%02i" y mon d h min s
-
-let weekday ~year ~mon ~day =
- let a = (14 - mon) / 12 in
- let y = year - a in
- let m = mon + (12 * a) - 2 in
- (day + y + (y / 4) - (y / 100) + (y / 400) + (31 * m / 12)) mod 7
-
-let to_rfc822 ((year, mon, day), ((h, min, s), _), print_type) =
- let timezone =
- match print_type with Empty | TZ "Z" -> "GMT" | TZ tz -> tz
- in
- let weekday = weekday ~year ~mon ~day in
- Printf.sprintf "%s, %d %s %d %02d:%02d:%02d %s" days.(weekday) day
- months.(mon - 1)
- year h min s timezone
-
-let to_ptime_t t =
- match to_dt t |> Ptime.of_date_time with
- | Some t ->
- t
- | None ->
- let _, (_, offset), _ = t in
- invalid_arg
- (Printf.sprintf "%s: dt='%s', offset='%i' is invalid" __FUNCTION__
- (to_rfc3339 t) offset
- )
-
-let to_ptime = to_ptime_t
-
-let of_ptime t = Ptime.to_date_time t |> of_dt utc
-
-let of_unix_time s =
- match Ptime.of_float_s s with
- | None ->
- invalid_arg (Printf.sprintf "%s: %f" __FUNCTION__ s)
- | Some t ->
- of_ptime t
-
-let to_unix_time t = to_ptime_t t |> Ptime.to_float_s
-
-let _localtime current_tz_offset t =
- let tz_offset_s = current_tz_offset |> Option.value ~default:0 in
- let localtime = t |> Ptime.to_date_time ~tz_offset_s |> of_dt Empty in
- let _, (_, localtime_offset), _ = localtime in
- if localtime_offset <> tz_offset_s then
- invalid_arg
- (Printf.sprintf "%s: offsets don't match. offset='%i', t='%s'"
- __FUNCTION__ tz_offset_s (Ptime.to_rfc3339 t)
- ) ;
- localtime
-
-let _localtime_string current_tz_offset t =
- _localtime current_tz_offset t |> to_rfc3339
-
-let localtime () =
- _localtime (Ptime_clock.current_tz_offset_s ()) (Ptime_clock.now ())
-
-let now () = of_ptime (Ptime_clock.now ())
-
-let epoch = of_ptime Ptime.epoch
-
-let is_earlier ~than t = Ptime.is_earlier ~than:(to_ptime than) (to_ptime t)
-
-let is_later ~than t = Ptime.is_later ~than:(to_ptime than) (to_ptime t)
-
-let diff a b = Ptime.diff (to_ptime a) (to_ptime b)
-
-let compare_print_tz a b =
- match (a, b) with
- | Empty, Empty ->
- 0
- | TZ a_s, TZ b_s ->
- String.compare a_s b_s
- | Empty, TZ _ ->
- -1
- | TZ _, Empty ->
- 1
-
-let compare ((_, _, a_z) as a) ((_, _, b_z) as b) =
- let ( > ) a b = if a = 0 then b else a in
- Ptime.compare (to_ptime a) (to_ptime b) > compare_print_tz a_z b_z
-
-let eq x y = compare x y = 0
+include Clock.Date
let never = epoch
diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/dune b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/dune
index c2ed6c448da..8566d86e12c 100644
--- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/dune
+++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-date/dune
@@ -1,16 +1,6 @@
(library
(name xapi_stdext_date)
(public_name xapi-stdext-date)
- (modules :standard \ test)
- (libraries astring
- ptime
- ptime.clock.os
- unix)
-)
-
-(test
- (name test)
- (package xapi-stdext-date)
- (modules test)
- (libraries alcotest xapi-stdext-date ptime)
+ (modules :standard)
+ (libraries clock ptime)
)
diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-encodings/encodings.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-encodings/encodings.ml
index 8d6d07e012a..2dfd45a7d18 100644
--- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-encodings/encodings.ml
+++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-encodings/encodings.ml
@@ -48,7 +48,7 @@ end
(* === UCS Validators === *)
module type UCS_VALIDATOR = sig
- val validate : Uchar.t -> unit [@@inline]
+ val validate : Uchar.t -> unit
end
module UTF8_UCS_validator = struct
diff --git a/ocaml/mpathalert/mpathalert.ml b/ocaml/mpathalert/mpathalert.ml
index 1ad0daedcec..c236f602702 100644
--- a/ocaml/mpathalert/mpathalert.ml
+++ b/ocaml/mpathalert/mpathalert.ml
@@ -37,7 +37,7 @@ let stdout_m = Mutex.create ()
let debug (fmt : ('a, unit, string, unit) format4) =
if !print_debug then
with_lock stdout_m (fun () ->
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
Printf.printf "%s [%d] %s\n"
(time_of_float (Unix.gettimeofday ()))
@@ -48,7 +48,7 @@ let debug (fmt : ('a, unit, string, unit) format4) =
fmt
)
else
- Printf.kprintf (Fun.const ()) fmt
+ Printf.ksprintf (Fun.const ()) fmt
type t = {
host: [`host] Uuidx.t
diff --git a/ocaml/perftest/perfdebug.ml b/ocaml/perftest/perfdebug.ml
index c7bdb0e03e7..4c71c8e8ce1 100644
--- a/ocaml/perftest/perfdebug.ml
+++ b/ocaml/perftest/perfdebug.ml
@@ -15,7 +15,7 @@ let stdout_m = Mutex.create ()
let debug ?(out = stdout) (fmt : ('a, unit, string, unit) format4) =
Xapi_stdext_threads.Threadext.Mutex.execute stdout_m (fun () ->
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
Printf.fprintf out "%s\n" s ;
flush stdout
diff --git a/ocaml/rrd2csv/src/rrd2csv.ml b/ocaml/rrd2csv/src/rrd2csv.ml
index bc6022584bd..4e36e581e5b 100644
--- a/ocaml/rrd2csv/src/rrd2csv.ml
+++ b/ocaml/rrd2csv/src/rrd2csv.ml
@@ -37,7 +37,7 @@ module Stdout = struct
let debug (fmt : ('a, unit, string, unit) format4) =
if !print_debug then
Xapi_stdext_threads.Threadext.Mutex.execute stdout_m (fun () ->
- Printf.kprintf
+ Printf.ksprintf
(fun s ->
Printf.printf "%s [%d] %s\n"
(time_of_float (Unix.gettimeofday ()))
@@ -48,7 +48,7 @@ module Stdout = struct
fmt
)
else
- Printf.kprintf (fun _ -> ()) fmt
+ Printf.ksprintf (fun _ -> ()) fmt
let string_of_float flt =
if fst (modf flt) = 0. then
diff --git a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/JsonRpcClient.java b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/JsonRpcClient.java
index b77cd815fb5..20d6e5efc8e 100644
--- a/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/JsonRpcClient.java
+++ b/ocaml/sdk-gen/java/autogen/xen-api/src/main/java/com/xensource/xenapi/JsonRpcClient.java
@@ -32,6 +32,7 @@
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.json.JsonReadFeature;
import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import org.apache.hc.client5.http.classic.methods.HttpPost;
@@ -214,6 +215,7 @@ private void initializeObjectMapperConfiguration() {
var dateHandlerModule = new SimpleModule("DateHandler");
dateHandlerModule.addDeserializer(Date.class, new CustomDateDeserializer());
this.objectMapper.enable(JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS.mappedFeature());
+ this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
this.objectMapper.registerModule(dateHandlerModule);
}
diff --git a/ocaml/tests/common/test_common.ml b/ocaml/tests/common/test_common.ml
index 6cf72c77ef8..90dfe287801 100644
--- a/ocaml/tests/common/test_common.ml
+++ b/ocaml/tests/common/test_common.ml
@@ -298,7 +298,8 @@ let make_pool ~__context ~master ?(name_label = "") ?(name_description = "")
?(telemetry_uuid = Ref.null) ?(telemetry_frequency = `weekly)
?(telemetry_next_collection = API.Date.never)
?(last_update_sync = API.Date.epoch) ?(update_sync_frequency = `daily)
- ?(update_sync_day = 0L) ?(update_sync_enabled = false) () =
+ ?(update_sync_day = 0L) ?(update_sync_enabled = false)
+ ?(recommendations = []) () =
let pool_ref = Ref.make () in
Db.Pool.create ~__context ~ref:pool_ref ~uuid:(make_uuid ()) ~name_label
~name_description ~master ~default_SR ~suspend_image_SR ~crash_dump_SR
@@ -316,7 +317,7 @@ let make_pool ~__context ~master ?(name_label = "") ?(name_description = "")
~migration_compression ~coordinator_bias ~telemetry_uuid
~telemetry_frequency ~telemetry_next_collection ~last_update_sync
~local_auth_max_threads:8L ~ext_auth_max_threads:8L ~update_sync_frequency
- ~update_sync_day ~update_sync_enabled ;
+ ~update_sync_day ~update_sync_enabled ~recommendations ;
pool_ref
let default_sm_features =
@@ -675,3 +676,10 @@ let make_observer ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ())
Db.Observer.create ~__context ~ref ~uuid ~name_label ~name_description ~hosts
~attributes ~endpoints ~components ~enabled ;
ref
+
+let make_vm_group ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ())
+ ?(name_label = "vm_group") ?(name_description = "") ?(placement = `normal)
+ () =
+ Db.VM_group.create ~__context ~ref ~uuid ~name_label ~name_description
+ ~placement ;
+ ref
diff --git a/ocaml/tests/suite_alcotest.ml b/ocaml/tests/suite_alcotest.ml
index 21a637d5ea7..be73d7cef06 100644
--- a/ocaml/tests/suite_alcotest.ml
+++ b/ocaml/tests/suite_alcotest.ml
@@ -46,6 +46,7 @@ let () =
; ("Test_storage_migrate_state", Test_storage_migrate_state.test)
; ("Test_bios_strings", Test_bios_strings.test)
; ("Test_certificates", Test_certificates.test)
+ ; ("Test_vm_group", Test_vm_group.test)
]
@ Test_guest_agent.tests
@ Test_nm.tests
diff --git a/ocaml/tests/test_ha_vm_failover.ml b/ocaml/tests/test_ha_vm_failover.ml
index 4ab377870ab..fe915563e18 100644
--- a/ocaml/tests/test_ha_vm_failover.ml
+++ b/ocaml/tests/test_ha_vm_failover.ml
@@ -27,6 +27,10 @@ type vbd = {agile: bool}
type vif = {agile: bool}
+type placement_policy = AntiAffinity | Normal
+
+type group = {name_label: string; placement: placement_policy}
+
type vm = {
ha_always_run: bool
; ha_restart_priority: string
@@ -34,6 +38,8 @@ type vm = {
; name_label: string
; vbds: vbd list
; vifs: vif list
+ ; groups: group list
+ ; power_state: string
}
let basic_vm =
@@ -44,6 +50,8 @@ let basic_vm =
; name_label= "vm"
; vbds= [{agile= true}]
; vifs= [{agile= true}]
+ ; groups= []
+ ; power_state= "running"
}
type host = {memory_total: int64; name_label: string; vms: vm list}
@@ -55,8 +63,67 @@ type pool = {
; cluster: int
}
-let string_of_vm {memory; name_label; _} =
- Printf.sprintf "{memory = %Ld; name_label = %S}" memory name_label
+let master = "master"
+
+let slave = "slave"
+
+let slave1 = "slave1"
+
+let slave2 = "slave2"
+
+let slave3 = "slave3"
+
+let grp1 = "grp1"
+
+let grp2 = "grp2"
+
+(** vmX_grpY: in test case for anti_affinity, the VM is the Xth smallest of slave1's VMs of
+ the same placement type in terms of VM's memory size, and it belows to VM group: grpY. *)
+let vm1_grp1 = "vm1_grp1"
+
+let vm2_grp1 = "vm2_grp1"
+
+let vm3_grp1 = "vm3_grp1"
+
+let vm4_grp1 = "vm4_grp1"
+
+let vm5_grp1 = "vm5_grp1"
+
+let vm6_grp1 = "vm6_grp1"
+
+let vm8_grp1 = "vm8_grp1"
+
+let vm2_grp2 = "vm2_grp2"
+
+let vm3_grp2 = "vm3_grp2"
+
+let vm4_grp2 = "vm4_grp2"
+
+let vm5_grp2 = "vm5_grp2"
+
+let vm7_grp2 = "vm7_grp2"
+
+(** In test case for anti_affinity, it is a VM resident on host other than slave1 *)
+let vm_grp1 = "vm_grp1"
+
+(** vmX: in test case for anti_affinity, it is a VM not in any VM group, and it is the Xth
+ largest of slave1's VMs not in any VM group in terms of VM's memory size. *)
+let vm1 = "vm1"
+
+let vm2 = "vm2"
+
+let vm3 = "vm3"
+
+let vm4 = "vm4"
+
+let string_of_group {name_label; placement} =
+ Printf.sprintf "{name_label = %S; placement = %S}" name_label
+ (match placement with AntiAffinity -> "anti_affinity" | Normal -> "normal")
+
+let string_of_vm {memory; name_label; groups; _} =
+ Printf.sprintf "{memory = %Ld; name_label = %S; groups = [%s]}" memory
+ name_label
+ (Test_printers.list string_of_group groups)
let string_of_host {memory_total; name_label; vms} =
Printf.sprintf "{memory_total = %Ld; name_label = %S; vms = [%s]}"
@@ -71,6 +138,26 @@ let string_of_pool {master; slaves; ha_host_failures_to_tolerate; cluster} =
(Test_printers.list string_of_host slaves)
ha_host_failures_to_tolerate cluster
+let load_group ~__context ~group =
+ let placement =
+ match group.placement with
+ | AntiAffinity ->
+ `anti_affinity
+ | Normal ->
+ `normal
+ in
+ match
+ Db.VM_group.get_all ~__context
+ |> List.find_opt (fun g ->
+ Db.VM_group.get_name_label ~__context ~self:g = group.name_label
+ && Db.VM_group.get_placement ~__context ~self:g = placement
+ )
+ with
+ | None ->
+ make_vm_group ~__context ~name_label:group.name_label ~placement ()
+ | Some g ->
+ g
+
let load_vm ~__context ~(vm : vm) ~local_sr ~shared_sr ~local_net ~shared_net =
let vm_ref =
make_vm ~__context ~ha_always_run:vm.ha_always_run
@@ -98,6 +185,14 @@ let load_vm ~__context ~(vm : vm) ~local_sr ~shared_sr ~local_net ~shared_net =
)
vm.vbds
in
+ let groups =
+ List.fold_left
+ (fun acc group -> load_group ~__context ~group :: acc)
+ [] vm.groups
+ in
+ Db.VM.set_groups ~__context ~self:vm_ref ~value:groups ;
+ if "running" = vm.power_state then
+ Db.VM.set_power_state ~__context ~self:vm_ref ~value:`Running ;
vm_ref
let load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net =
@@ -110,7 +205,11 @@ let load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net =
let (_ : API.ref_VM list) =
List.map
(fun vm ->
- load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net
+ let vm_ref =
+ load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net
+ in
+ Db.VM.set_resident_on ~__context ~self:vm_ref ~value:host_ref ;
+ vm_ref
)
host.vms
in
@@ -184,7 +283,7 @@ module AllProtectedVms = Generic.MakeStateful (struct
[
(* No VMs and a single host. *)
( {
- master= {memory_total= gib 256L; name_label= "master"; vms= []}
+ master= {memory_total= gib 256L; name_label= master; vms= []}
; slaves= []
; ha_host_failures_to_tolerate= 0L
; cluster= 0
@@ -196,7 +295,7 @@ module AllProtectedVms = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
{basic_vm with ha_always_run= false; ha_restart_priority= ""}
@@ -213,7 +312,7 @@ module AllProtectedVms = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms= [{basic_vm with ha_always_run= false}]
}
; slaves= []
@@ -224,8 +323,7 @@ module AllProtectedVms = Generic.MakeStateful (struct
)
; (* One protected VM. *)
( {
- master=
- {memory_total= gib 256L; name_label= "master"; vms= [basic_vm]}
+ master= {memory_total= gib 256L; name_label= master; vms= [basic_vm]}
; slaves= []
; ha_host_failures_to_tolerate= 0L
; cluster= 0
@@ -237,15 +335,15 @@ module AllProtectedVms = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
- {basic_vm with name_label= "vm1"}
+ {basic_vm with name_label= vm1}
; {
basic_vm with
ha_always_run= false
; ha_restart_priority= ""
- ; name_label= "vm2"
+ ; name_label= vm2
}
]
}
@@ -253,7 +351,7 @@ module AllProtectedVms = Generic.MakeStateful (struct
; ha_host_failures_to_tolerate= 0L
; cluster= 0
}
- , ["vm1"]
+ , [vm1]
)
]
end)
@@ -293,8 +391,8 @@ module PlanForNFailures = Generic.MakeStateful (struct
[
(* Two host pool with no VMs. *)
( {
- master= {memory_total= gib 256L; name_label= "master"; vms= []}
- ; slaves= [{memory_total= gib 256L; name_label= "slave"; vms= []}]
+ master= {memory_total= gib 256L; name_label= master; vms= []}
+ ; slaves= [{memory_total= gib 256L; name_label= slave; vms= []}]
; ha_host_failures_to_tolerate= 1L
; cluster= 0
}
@@ -306,10 +404,10 @@ module PlanForNFailures = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
- ; vms= [{basic_vm with memory= gib 120L; name_label= "vm1"}]
+ ; name_label= master
+ ; vms= [{basic_vm with memory= gib 120L; name_label= vm1}]
}
- ; slaves= [{memory_total= gib 256L; name_label= "slave"; vms= []}]
+ ; slaves= [{memory_total= gib 256L; name_label= slave; vms= []}]
; ha_host_failures_to_tolerate= 1L
; cluster= 0
}
@@ -320,14 +418,14 @@ module PlanForNFailures = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
- {basic_vm with memory= gib 120L; name_label= "vm1"}
- ; {basic_vm with memory= gib 120L; name_label= "vm2"}
+ {basic_vm with memory= gib 120L; name_label= vm1}
+ ; {basic_vm with memory= gib 120L; name_label= vm2}
]
}
- ; slaves= [{memory_total= gib 256L; name_label= "slave"; vms= []}]
+ ; slaves= [{memory_total= gib 256L; name_label= slave; vms= []}]
; ha_host_failures_to_tolerate= 1L
; cluster= 0
}
@@ -338,22 +436,22 @@ module PlanForNFailures = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
- {basic_vm with memory= gib 120L; name_label= "vm1"}
- ; {basic_vm with memory= gib 120L; name_label= "vm2"}
+ {basic_vm with memory= gib 120L; name_label= vm1}
+ ; {basic_vm with memory= gib 120L; name_label= vm2}
]
}
; slaves=
[
{
memory_total= gib 256L
- ; name_label= "slave"
+ ; name_label= slave
; vms=
[
- {basic_vm with memory= gib 120L; name_label= "vm3"}
- ; {basic_vm with memory= gib 120L; name_label= "vm4"}
+ {basic_vm with memory= gib 120L; name_label= vm3}
+ ; {basic_vm with memory= gib 120L; name_label= vm4}
]
}
]
@@ -422,10 +520,10 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
- ; vms= [{basic_vm with memory= gib 120L; name_label= "vm1"}]
+ ; name_label= master
+ ; vms= [{basic_vm with memory= gib 120L; name_label= vm1}]
}
- ; slaves= [{memory_total= gib 256L; name_label= "slave"; vms= []}]
+ ; slaves= [{memory_total= gib 256L; name_label= slave; vms= []}]
; ha_host_failures_to_tolerate= 1L
; cluster= 0
}
@@ -434,7 +532,7 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
ha_always_run= false
; ha_restart_priority= "restart"
; memory= gib 120L
- ; name_label= "vm2"
+ ; name_label= vm2
}
)
, Ok ()
@@ -445,14 +543,14 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
- {basic_vm with memory= gib 120L; name_label= "vm1"}
- ; {basic_vm with memory= gib 120L; name_label= "vm2"}
+ {basic_vm with memory= gib 120L; name_label= vm1}
+ ; {basic_vm with memory= gib 120L; name_label= vm2}
]
}
- ; slaves= [{memory_total= gib 256L; name_label= "slave"; vms= []}]
+ ; slaves= [{memory_total= gib 256L; name_label= slave; vms= []}]
; ha_host_failures_to_tolerate= 1L
; cluster= 0
}
@@ -461,7 +559,7 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
ha_always_run= false
; ha_restart_priority= "restart"
; memory= gib 120L
- ; name_label= "vm2"
+ ; name_label= vm2
}
)
, Error
@@ -475,19 +573,19 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
master=
{
memory_total= gib 256L
- ; name_label= "master"
+ ; name_label= master
; vms=
[
- {basic_vm with memory= gib 120L; name_label= "vm1"}
- ; {basic_vm with memory= gib 120L; name_label= "vm2"}
+ {basic_vm with memory= gib 120L; name_label= vm1}
+ ; {basic_vm with memory= gib 120L; name_label= vm2}
]
}
; slaves=
[
{
memory_total= gib 256L
- ; name_label= "slave"
- ; vms= [{basic_vm with memory= gib 120L; name_label= "vm1"}]
+ ; name_label= slave
+ ; vms= [{basic_vm with memory= gib 120L; name_label= vm1}]
}
]
; ha_host_failures_to_tolerate= 1L
@@ -498,7 +596,7 @@ module AssertNewVMPreservesHAPlan = Generic.MakeStateful (struct
ha_always_run= false
; ha_restart_priority= "restart"
; memory= gib 120L
- ; name_label= "vm2"
+ ; name_label= vm2
}
)
, Ok ()
@@ -533,11 +631,11 @@ module ComputeMaxFailures = Generic.MakeStateful (struct
[
(* Three host pool with no VMs. *)
( {
- master= {memory_total= gib 256L; name_label= "master"; vms= []}
+ master= {memory_total= gib 256L; name_label= master; vms= []}
; slaves=
[
- {memory_total= gib 256L; name_label= "slave1"; vms= []}
- ; {memory_total= gib 256L; name_label= "slave2"; vms= []}
+ {memory_total= gib 256L; name_label= slave1; vms= []}
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
]
; (* Placeholder value that is overridden when we call the compute function *)
ha_host_failures_to_tolerate= 3L
@@ -548,8 +646,8 @@ module ComputeMaxFailures = Generic.MakeStateful (struct
)
; (* Two hosts pool with no VMs *)
( {
- master= {memory_total= gib 256L; name_label= "master"; vms= []}
- ; slaves= [{memory_total= gib 256L; name_label= "slave1"; vms= []}]
+ master= {memory_total= gib 256L; name_label= master; vms= []}
+ ; slaves= [{memory_total= gib 256L; name_label= slave1; vms= []}]
; ha_host_failures_to_tolerate= 2L
; cluster= 2
}
@@ -558,8 +656,8 @@ module ComputeMaxFailures = Generic.MakeStateful (struct
)
; (* Two host pool with one down *)
( {
- master= {memory_total= gib 256L; name_label= "master"; vms= []}
- ; slaves= [{memory_total= gib 256L; name_label= "slave1"; vms= []}]
+ master= {memory_total= gib 256L; name_label= master; vms= []}
+ ; slaves= [{memory_total= gib 256L; name_label= slave1; vms= []}]
; ha_host_failures_to_tolerate= 2L
; cluster= 1
}
@@ -569,4 +667,730 @@ module ComputeMaxFailures = Generic.MakeStateful (struct
]
end)
-let tests = [("plan_for_n_failures", PlanForNFailures.tests)]
+let extract_output_for_anti_aff_plan ~__context plan =
+ plan
+ |> List.map (fun (vm, host) ->
+ ( Db.VM.get_name_label ~__context ~self:vm
+ , Db.Host.get_name_label ~__context ~self:host
+ )
+ )
+
+let anti_aff_grp1 = {name_label= grp1; placement= AntiAffinity}
+
+let anti_aff_plan_test_cases =
+ [
+ (* Test 0: No VMs in slave1 to be evacuated. *)
+ ( {
+ master= {memory_total= gib 256L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {memory_total= gib 256L; name_label= slave1; vms= []}
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ []
+ , (* Assert that no_breach_plan returns as expected *)
+ []
+ )
+ ; (* Test 1: No anti-affinity VMs in slave1 to be evacuated *)
+ ( {
+ master= {memory_total= gib 256L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {basic_vm with memory= gib 120L; name_label= vm1}
+ ; {basic_vm with memory= gib 120L; name_label= vm2}
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ []
+ , (* Assert that no_breach_plan returns as expected *)
+ []
+ )
+ ; (* Test 2: One anti-affinity VM in slave1 to be evacuated *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {basic_vm with memory= gib 120L; name_label= vm1}
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ [(vm1_grp1, slave2)]
+ , (* Assert that no_breach_plan returns as expected *)
+ [(vm1_grp1, slave2)]
+ )
+ ; (* Test 3: One anti-affinity VM in slave1 to be evacuated, the smallest host already has anti-affinity VM in the same group *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {basic_vm with memory= gib 120L; name_label= "vm2"}
+ ]
+ }
+ ; {
+ memory_total= gib 256L
+ ; name_label= slave2
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ [(vm1_grp1, master)]
+ , (* Assert that no_breach_plan returns as expected *)
+ [(vm1_grp1, master)]
+ )
+ ; (* Test 4: Two anti-affinity VMs belong to one group in slave1 to be evacuated *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 130L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ [(vm2_grp1, master); (vm1_grp1, slave2)]
+ , (* Assert that no_breach_plan returns as expected *)
+ [(vm2_grp1, master); (vm1_grp1, slave2)]
+ )
+ ; (* Test 5: Two anti-affinity VMs belong to one group in slave1 to be evacuated, only 1 can be planed *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 513L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ []
+ , (* Assert that no_breach_plan returns as expected *)
+ [(vm1_grp1, slave2)]
+ )
+ ; (* Test 6: 6 anti-affinity VMs belong to one group in slave1 to be evacuated, only 5 can be planned *)
+ ( {
+ master= {memory_total= gib 640L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 60L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 400L
+ ; name_label= vm6_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 250L
+ ; name_label= vm4_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 260L
+ ; name_label= vm5_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 130L
+ ; name_label= vm3_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ []
+ , (* Assert that no_breach_plan returns as expected *)
+ [(vm2_grp1, master); (vm1_grp1, slave2)]
+ )
+ ; (* Test 7: Two groups anti-affinity VMs in slave1 to be evacuated *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm6_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 60L
+ ; name_label= vm5_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 130L
+ ; name_label= vm7_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 1L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 2L
+ ; name_label= vm2_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 3L
+ ; name_label= vm3_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 4L
+ ; name_label= vm4_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ]
+ }
+ ; {memory_total= gib 256L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ [
+ (vm7_grp2, master)
+ ; (vm6_grp1, slave2)
+ ; (vm5_grp2, slave2)
+ ; (vm4_grp2, master)
+ ; (vm3_grp1, master)
+ ; (vm2_grp2, slave2)
+ ; (vm1_grp1, slave2)
+ ]
+ , (* Assert that no_breach_plan returns as expected *)
+ [
+ (vm4_grp2, master)
+ ; (vm3_grp1, master)
+ ; (vm2_grp2, slave2)
+ ; (vm1_grp1, slave2)
+ ]
+ )
+ ; (* Test 8: Two groups anti-affinity VMs in slave1 to be evacuated, master is bigger than slave2 in size when started, but becomes smaller during planning *)
+ ( {
+ master= {memory_total= gib 512L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 120L
+ ; name_label= vm6_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 60L
+ ; name_label= vm5_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 130L
+ ; name_label= vm7_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 1L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 6L
+ ; name_label= vm3_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ; {
+ basic_vm with
+ memory= gib 5L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 7L
+ ; name_label= vm4_grp2
+ ; groups= [{name_label= grp2; placement= AntiAffinity}]
+ }
+ ]
+ }
+ ; {memory_total= gib 510L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan returns as expected *)
+ [
+ (vm7_grp2, slave2)
+ ; (vm6_grp1, master)
+ ; (vm5_grp2, master)
+ ; (vm4_grp2, slave2)
+ ; (vm3_grp2, master)
+ ; (vm2_grp1, master)
+ ; (vm1_grp1, slave2)
+ ]
+ , (* Assert that no_breach_plan returns as expected *)
+ [
+ (vm4_grp2, slave2)
+ ; (vm3_grp2, master)
+ ; (vm2_grp1, master)
+ ; (vm1_grp1, slave2)
+ ]
+ )
+ ]
+
+module Slave1EvacuationVMAntiAffinitySpreadEvenlyPlan =
+Generic.MakeStateful (struct
+ module Io = struct
+ type input_t = pool
+
+ type output_t = (string * string) list
+
+ let string_of_input_t = string_of_pool
+
+ let string_of_output_t = Test_printers.(list (pair string string))
+ end
+
+ module State = Test_state.XapiDb
+
+ let load_input __context = setup ~__context
+
+ let extract_output __context _ =
+ let slv1 =
+ Db.Host.get_all ~__context
+ |> List.find (fun self -> Db.Host.get_name_label ~__context ~self = slave1)
+ in
+ let slave1_anti_aff_vms =
+ Db.Host.get_resident_VMs ~__context ~self:slv1
+ |> List.map (fun self -> (self, Db.VM.get_record ~__context ~self))
+ |> List.filter (fun (_, record) -> not record.API.vM_is_control_domain)
+ |> List.map (fun (self, record) ->
+ (self, Xapi_ha_vm_failover.vm_memory ~__context record)
+ )
+ |> Xapi_ha_vm_failover.vms_partition ~__context
+ |> fst
+ in
+ let hosts =
+ Db.Host.get_all ~__context
+ |> List.filter (( <> ) slv1)
+ |> List.map (fun host ->
+ (host, Xapi_ha_vm_failover.host_free_memory ~__context ~host)
+ )
+ in
+ let pool_state =
+ Xapi_ha_vm_failover.init_spread_evenly_plan_pool_state ~__context
+ slave1_anti_aff_vms hosts
+ in
+ extract_output_for_anti_aff_plan ~__context
+ (Xapi_ha_vm_failover.compute_spread_evenly_plan ~__context pool_state
+ (slave1_anti_aff_vms
+ |> List.sort (fun (_, a, _) (_, b, _) -> compare a b)
+ )
+ )
+
+ let tests =
+ `QuickAndAutoDocumented
+ (anti_aff_plan_test_cases
+ |> List.map (fun (pool, spread_evenly_plan, _no_breach_plan) ->
+ (pool, spread_evenly_plan)
+ )
+ )
+end)
+
+module Slave1EvacuationVMAntiAffinityNoBreachPlan = Generic.MakeStateful (struct
+ module Io = struct
+ type input_t = pool
+
+ type output_t = (string * string) list
+
+ let string_of_input_t = string_of_pool
+
+ let string_of_output_t = Test_printers.(list (pair string string))
+ end
+
+ module State = Test_state.XapiDb
+
+ let load_input __context = setup ~__context
+
+ let extract_output __context _ =
+ let slv1 =
+ Db.Host.get_all ~__context
+ |> List.find (fun self -> Db.Host.get_name_label ~__context ~self = slave1)
+ in
+ let slave1_anti_aff_vms =
+ Db.Host.get_resident_VMs ~__context ~self:slv1
+ |> List.map (fun self -> (self, Db.VM.get_record ~__context ~self))
+ |> List.filter (fun (_, record) -> not record.API.vM_is_control_domain)
+ |> List.map (fun (self, record) ->
+ (self, Xapi_ha_vm_failover.vm_memory ~__context record)
+ )
+ |> Xapi_ha_vm_failover.vms_partition ~__context
+ |> fst
+ in
+ let hosts =
+ Db.Host.get_all ~__context
+ |> List.filter (( <> ) slv1)
+ |> List.map (fun host ->
+ (host, Xapi_ha_vm_failover.host_free_memory ~__context ~host)
+ )
+ in
+ let pool_state =
+ Xapi_ha_vm_failover.init_spread_evenly_plan_pool_state ~__context
+ slave1_anti_aff_vms hosts
+ |> Xapi_ha_vm_failover.init_no_breach_plan_pool_state
+ in
+ extract_output_for_anti_aff_plan ~__context
+ (Xapi_ha_vm_failover.compute_no_breach_plan ~__context pool_state
+ (slave1_anti_aff_vms
+ |> List.sort (fun (_, a, _) (_, b, _) -> compare a b)
+ )
+ |> fst
+ )
+
+ let tests =
+ `QuickAndAutoDocumented
+ (anti_aff_plan_test_cases
+ |> List.map (fun (pool, _spread_evenly_plan, no_breach_plan) ->
+ (pool, no_breach_plan)
+ )
+ )
+end)
+
+module Slave1EvacuationPlan = Generic.MakeStateful (struct
+ module Io = struct
+ type input_t = pool
+
+ type output_t = (string * string) list
+
+ let string_of_input_t = string_of_pool
+
+ let string_of_output_t = Test_printers.(list (pair string string))
+ end
+
+ module State = Test_state.XapiDb
+
+ let load_input __context = setup ~__context
+
+ let extract_output __context _ =
+ let all_hosts = Db.Host.get_all ~__context in
+ let slv1 =
+ Db.Host.get_all ~__context
+ |> List.find (fun self -> Db.Host.get_name_label ~__context ~self = slave1)
+ in
+ let slave1_vms =
+ Db.Host.get_resident_VMs ~__context ~self:slv1
+ |> List.map (fun self -> (self, Db.VM.get_record ~__context ~self))
+ |> List.filter (fun (_, record) -> not record.API.vM_is_control_domain)
+ |> List.map (fun (self, record) ->
+ (self, Xapi_ha_vm_failover.vm_memory ~__context record)
+ )
+ in
+ let hosts =
+ all_hosts
+ |> List.filter (( <> ) slv1)
+ |> List.map (fun host ->
+ (host, Xapi_ha_vm_failover.host_free_memory ~__context ~host)
+ )
+ in
+ Xapi_ha_vm_failover.compute_anti_aff_evac_plan ~__context
+ (List.length all_hosts) hosts slave1_vms
+ |> List.map (fun (vm, host) ->
+ ( Db.VM.get_name_label ~__context ~self:vm
+ , Db.Host.get_name_label ~__context ~self:host
+ )
+ )
+
+ let tests =
+ `QuickAndAutoDocumented
+ [
+ (* Test 0: Spread evenly plan is taken. *)
+ ( {
+ master= {memory_total= gib 200L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {
+ basic_vm with
+ memory= gib 24L
+ ; name_label= vm4_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 23L
+ ; name_label= vm3_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 22L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 21L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 60L; name_label= slave2; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that spread_evenly_plan is taken. *)
+ [
+ (vm4_grp1, master)
+ ; (vm3_grp1, slave2)
+ ; (vm2_grp1, master)
+ ; (vm1_grp1, slave2)
+ ]
+ )
+ (* Test 1: No breach plan is taken. *)
+ ; ( {
+ master= {memory_total= gib 100L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {basic_vm with memory= gib 85L; name_label= vm1}
+ ; {basic_vm with memory= gib 65L; name_label= vm2}
+ ; {
+ basic_vm with
+ memory= gib 30L
+ ; name_label= vm3_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 20L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 10L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 90L; name_label= slave2; vms= []}
+ ; {memory_total= gib 70L; name_label= slave3; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that no-breach-plan is taken *)
+ [
+ (vm2_grp1, slave2)
+ ; (vm1_grp1, slave3)
+ ; (vm3_grp1, slave3)
+ ; (vm2, slave2)
+ ; (vm1, master)
+ ]
+ )
+ (* Test 2: Fallback to binpack plan. *)
+ ; ( {
+ master= {memory_total= gib 100L; name_label= master; vms= []}
+ ; slaves=
+ [
+ {
+ memory_total= gib 256L
+ ; name_label= slave1
+ ; vms=
+ [
+ {basic_vm with memory= gib 95L; name_label= vm1}
+ ; {basic_vm with memory= gib 75L; name_label= vm2}
+ ; {
+ basic_vm with
+ memory= gib 30L
+ ; name_label= vm3_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 20L
+ ; name_label= vm2_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ; {
+ basic_vm with
+ memory= gib 10L
+ ; name_label= vm1_grp1
+ ; groups= [anti_aff_grp1]
+ }
+ ]
+ }
+ ; {memory_total= gib 80L; name_label= slave2; vms= []}
+ ; {memory_total= gib 70L; name_label= slave3; vms= []}
+ ]
+ ; ha_host_failures_to_tolerate= 0L
+ ; cluster= 0
+ }
+ , (* Assert that binpack-plan is taken *)
+ [
+ (vm1_grp1, slave3)
+ ; (vm2_grp1, slave3)
+ ; (vm3_grp1, slave3)
+ ; (vm2, slave2)
+ ; (vm1, master)
+ ]
+ )
+ ]
+end)
+
+let tests =
+ [
+ ("plan_for_n_failures", PlanForNFailures.tests)
+ ; ( "anti-affinity spread evenly plan"
+ , Slave1EvacuationVMAntiAffinitySpreadEvenlyPlan.tests
+ )
+ ; ( "anti-affinity no breach plan"
+ , Slave1EvacuationVMAntiAffinityNoBreachPlan.tests
+ )
+ ; ( "3 phases planning: spread evenly plan, no breach plan, binpacking plan"
+ , Slave1EvacuationPlan.tests
+ )
+ ]
diff --git a/ocaml/tests/test_vm_group.ml b/ocaml/tests/test_vm_group.ml
new file mode 100644
index 00000000000..910711f9646
--- /dev/null
+++ b/ocaml/tests/test_vm_group.ml
@@ -0,0 +1,55 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+module T = Test_common
+
+let test_associate_vm_with_vm_group () =
+ let __context = T.make_test_database () in
+ let rpc, session_id = Test_common.make_client_params ~__context in
+ let vm1 = T.make_vm ~__context () in
+ let vm2 = T.make_vm ~__context () in
+ let vm3 = T.make_vm ~__context () in
+ let vm_group = T.make_vm_group ~__context ~placement:`anti_affinity () in
+ Client.Client.VM.set_groups ~rpc ~session_id ~self:vm1 ~value:[vm_group] ;
+ Client.Client.VM.set_groups ~rpc ~session_id ~self:vm2 ~value:[vm_group] ;
+ Client.Client.VM.set_groups ~rpc ~session_id ~self:vm3 ~value:[vm_group] ;
+ let vms = Db.VM_group.get_VMs ~__context ~self:vm_group in
+ let extract_vm_strings vms =
+ List.sort String.compare (List.map Ref.string_of vms)
+ in
+ Alcotest.(check (slist string String.compare))
+ "check VMs are in the group" (extract_vm_strings vms)
+ (extract_vm_strings [vm1; vm2; vm3])
+
+let test_vm_can_only_belong_to_one_group () =
+ let __context = T.make_test_database () in
+ let rpc, session_id = Test_common.make_client_params ~__context in
+ let vm = T.make_vm ~__context () in
+ let vm_group1 = T.make_vm_group ~__context ~placement:`anti_affinity () in
+ let vm_group2 = T.make_vm_group ~__context ~placement:`anti_affinity () in
+ Alcotest.check_raises "should fail"
+ (Api_errors.Server_error (Api_errors.too_many_groups, []))
+ (fun () ->
+ Client.Client.VM.set_groups ~rpc ~session_id ~self:vm
+ ~value:[vm_group1; vm_group2]
+ )
+
+let test =
+ [
+ ("test_associate_vm_with_vm_group", `Quick, test_associate_vm_with_vm_group)
+ ; ( "test_vm_can_only_belong_to_one_group"
+ , `Quick
+ , test_vm_can_only_belong_to_one_group
+ )
+ ]
diff --git a/ocaml/tests/test_vm_helpers.ml b/ocaml/tests/test_vm_helpers.ml
index bdd8dc061d8..f62c5145971 100644
--- a/ocaml/tests/test_vm_helpers.ml
+++ b/ocaml/tests/test_vm_helpers.ml
@@ -159,7 +159,7 @@ let rec assert_equivalent expected_grouping actual_grouping =
assert_host_groups_equal e g ;
assert_equivalent es gs
-let assert_host_groups_equal_for_vgpu g g' =
+let assert_host_groups_equal g g' =
match g' with
| [] ->
()
@@ -170,7 +170,7 @@ let assert_host_groups_equal_for_vgpu g g' =
Alcotest.(check (slist string String.compare))
"check host strings" (extract_host_strings g) (extract_host_strings g')
-let rec assert_equivalent_for_vgpu expected_grouping actual_grouping =
+let rec assert_equivalent_for_grouping expected_grouping actual_grouping =
match (expected_grouping, actual_grouping) with
| [], [] ->
()
@@ -181,13 +181,13 @@ let rec assert_equivalent_for_vgpu expected_grouping actual_grouping =
Alcotest.fail
(Printf.sprintf "%d fewer groups than expected." (List.length xx))
| e :: es, g :: gs ->
- assert_host_groups_equal_for_vgpu e g ;
- assert_equivalent_for_vgpu es gs
+ assert_host_groups_equal e g ;
+ assert_equivalent_for_grouping es gs
let assert_grouping ~__context gpu_group ~visible_hosts vgpu_type g =
let vgpu = VGPU_T.make_vgpu ~__context ~gPU_group:gpu_group vgpu_type in
let host_lists = rank_hosts_by_best_vgpu ~__context vgpu visible_hosts in
- assert_equivalent_for_vgpu g host_lists
+ assert_equivalent_for_grouping g host_lists
let check_expectations ~__context gpu_group visible_hosts vgpu_type
expected_grouping =
@@ -524,6 +524,44 @@ let test_group_hosts_netsriov_with_allocated () =
"Test-failure: Unexpected number of sriov network in test"
)
+let on_pool_of_anti_affinity placement
+ (f : Context.t -> API.ref_host -> API.ref_host -> API.ref_host -> 'a) =
+ let __context = T.make_test_database () in
+ let h1 =
+ match Db.Host.get_all ~__context with
+ | h :: _ ->
+ h
+ | [] ->
+ T.make_host ~__context ()
+ in
+ (* Make two more hosts *)
+ let h2 = T.make_host ~__context () in
+ let h3 = T.make_host ~__context () in
+ let g = T.make_vm_group ~__context ~placement () in
+ f __context h1 h2 h3 g
+
+let test_get_group_key_anti_affinity () =
+ on_pool_of_anti_affinity `anti_affinity (fun __context _ _ _ g ->
+ let vm = T.make_vm ~__context () in
+ Db.VM.set_groups ~__context ~self:vm ~value:[g] ;
+ match Xapi_vm_helpers.get_group_key ~__context ~vm with
+ | `AntiAffinity _ ->
+ ()
+ | _ ->
+ Alcotest.fail "Test-failure: Unexpected Group Key in test"
+ )
+
+let test_get_group_key_normal_group () =
+ on_pool_of_anti_affinity `normal (fun __context _ _ _ g ->
+ let vm = T.make_vm ~__context () in
+ Db.VM.set_groups ~__context ~self:vm ~value:[g] ;
+ match Xapi_vm_helpers.get_group_key ~__context ~vm with
+ | `Other ->
+ ()
+ | _ ->
+ Alcotest.fail "Test-failure: Unexpected Group Key in test"
+ )
+
let test_get_group_key_vgpu () =
on_pool_of_intel_i350 (fun __context _ _ _ ->
let group = List.hd (Db.GPU_group.get_all ~__context) in
@@ -573,6 +611,461 @@ let test_get_group_key_vgpu_and_netsriov () =
Alcotest.fail "Test-failure: Unexpected Group Key in test"
)
+let test_get_group_key_anti_affinity_and_vgpu_and_netsriov () =
+ on_pool_of_intel_i350 (fun __context _ _ _ ->
+ let group =
+ match Db.GPU_group.get_all ~__context with
+ | g :: _ ->
+ g
+ | [] ->
+ Alcotest.fail "Can not find any GPU_group"
+ in
+ let vm = make_vm_with_vgpu_in_group ~__context VGPU_T.k100 group in
+ let sriov_network =
+ List.find
+ (fun network ->
+ Xapi_network_sriov_helpers.is_sriov_network ~__context ~self:network
+ )
+ (Db.Network.get_all ~__context)
+ in
+ let (_ : API.ref_VIF) =
+ T.make_vif ~__context ~vM:vm ~network:sriov_network ()
+ in
+ let anti_affinity_group =
+ T.make_vm_group ~__context ~placement:`anti_affinity ()
+ in
+ Db.VM.set_groups ~__context ~self:vm ~value:[anti_affinity_group] ;
+ match Xapi_vm_helpers.get_group_key ~__context ~vm with
+ | `AntiAffinity _ ->
+ ()
+ | _ ->
+ Alcotest.fail "Test-failure: Unexpected Group Key in test"
+ )
+
+module VMAntiAffinityRankedGrpTest = struct
+ type vm_state = Running | Starting | Migrating | Suspended | Paused | Halted
+
+ type vm_info = {
+ name: string option
+ ; host: string option
+ ; group: string option
+ ; state: vm_state
+ }
+
+ type test_case = {
+ description: string
+ ; vm_to_start: vm_info
+ ; other_vms: vm_info list
+ ; hosts: string option list
+ ; affinity_host: string option
+ ; expected: string option list list
+ }
+
+ let vm_to_start = Some "vm"
+
+ let vm1 = Some "vm1"
+
+ let vm2 = Some "vm2"
+
+ let vm3 = Some "vm3"
+
+ let vm4 = Some "vm4"
+
+ let vm5 = Some "vm5"
+
+ let vm6 = Some "vm6"
+
+ let h1 = Some "h1"
+
+ let h2 = Some "h2"
+
+ let h3 = Some "h3"
+
+ let anti_affinity = Some "anti-affinity"
+
+ let other_group = Some "other-group"
+
+ let test_cases =
+ [
+ {
+ description= "No other VM"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms= []
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "VMs not in group"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= None; state= Running}
+ ; {name= vm2; host= h3; group= None; state= Running}
+ ; {name= vm3; host= h3; group= None; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "VMs in other group"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= other_group; state= Running}
+ ; {name= vm2; host= h3; group= other_group; state= Running}
+ ; {name= vm3; host= h3; group= other_group; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 running VMs (h1(0) h2(1) h3(2))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1]; [h2]; [h3]]
+ }
+ ; {
+ description= "3 running VMs (h1(1) h2(1) h3(1))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h1; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 running VMs (h1(0) h2(0) h3(3))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2]; [h3]]
+ }
+ ; {
+ description= "3 starting VMs (h1(0) h2(1) h3(2))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= anti_affinity; state= Starting}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Starting}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Starting}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1]; [h2]; [h3]]
+ }
+ ; {
+ description= "3 starting VMs (h1(1) h2(1) h3(1))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h1; group= anti_affinity; state= Starting}
+ ; {name= vm2; host= h2; group= anti_affinity; state= Starting}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Starting}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 starting VMs (h1(0) h2(0) h3(3))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h3; group= anti_affinity; state= Starting}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Starting}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Starting}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2]; [h3]]
+ }
+ ; {
+ description= "3 migrating VMs (h1(0) h2(1) h3(2))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= anti_affinity; state= Migrating}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Migrating}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Migrating}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1]; [h2]; [h3]]
+ }
+ ; {
+ description= "3 migrating VMs (h1(0) h2(0) h3(3))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h3; group= anti_affinity; state= Migrating}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Migrating}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Migrating}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2]; [h3]]
+ }
+ ; {
+ description= "3 stopped VMs"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= None; group= anti_affinity; state= Halted}
+ ; {name= vm2; host= None; group= anti_affinity; state= Halted}
+ ; {name= vm3; host= None; group= anti_affinity; state= Halted}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 suspended VMs"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= None; group= anti_affinity; state= Suspended}
+ ; {name= vm2; host= None; group= anti_affinity; state= Suspended}
+ ; {name= vm3; host= None; group= anti_affinity; state= Suspended}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 paused VMs (h1(0) h2(1) h3(2))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h2; group= anti_affinity; state= Paused}
+ ; {name= vm2; host= h3; group= anti_affinity; state= Paused}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Paused}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ; {
+ description= "3 running VMs with affinity-host"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h1; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= h1
+ ; expected= [[h1]; [h2; h3]]
+ }
+ ; {
+ description= "6 running VMs (h1(1) h2(2) h3(3))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h1; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm4; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm5; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm6; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1]; [h2]; [h3]]
+ }
+ ; {
+ description= "6 running VMs (h1(2) h2(2) h3(2))"
+ ; vm_to_start=
+ {name= vm_to_start; host= None; group= anti_affinity; state= Halted}
+ ; other_vms=
+ [
+ {name= vm1; host= h1; group= anti_affinity; state= Running}
+ ; {name= vm2; host= h1; group= anti_affinity; state= Running}
+ ; {name= vm3; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm4; host= h2; group= anti_affinity; state= Running}
+ ; {name= vm5; host= h3; group= anti_affinity; state= Running}
+ ; {name= vm6; host= h3; group= anti_affinity; state= Running}
+ ]
+ ; hosts= [h1; h2; h3]
+ ; affinity_host= None
+ ; expected= [[h1; h2; h3]]
+ }
+ ]
+
+ let make_hosts ~__context ~hosts =
+ match hosts with
+ | fst :: others ->
+ let host1 =
+ match Db.Host.get_all ~__context with
+ | h :: _ ->
+ h
+ | _ ->
+ T.make_host ~__context ()
+ in
+ Db.Host.set_name_label ~__context ~self:host1 ~value:(Option.get fst) ;
+ List.iter
+ (fun h ->
+ let _ = T.make_host ~__context ~name_label:(Option.get h) () in
+ ()
+ )
+ others
+ | [] ->
+ ()
+
+ let make_vm_based_on_vm_info ~__context ~vm_info =
+ let vm =
+ T.make_vm ~__context
+ ~name_label:(Option.value vm_info.name ~default:(Option.get vm_to_start))
+ ()
+ in
+ ( match vm_info.group with
+ | None ->
+ ()
+ | Some group_name ->
+ let group =
+ match Db.VM_group.get_by_name_label ~__context ~label:group_name with
+ | g :: _ ->
+ g
+ | [] ->
+ T.make_vm_group ~__context ~placement:`anti_affinity
+ ~name_label:group_name ()
+ in
+ Db.VM.set_groups ~__context ~self:vm ~value:[group]
+ ) ;
+ ( match vm_info.host with
+ | None ->
+ ()
+ | Some host_name -> (
+ let host =
+ match Db.Host.get_by_name_label ~__context ~label:host_name with
+ | h :: _ ->
+ h
+ | [] ->
+ Alcotest.fail "Can not find any host by name_label"
+ in
+ match vm_info.state with
+ | Running ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Running ;
+ Db.VM.set_resident_on ~__context ~self:vm ~value:host
+ | Starting ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Halted ;
+ Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm
+ ~value:host
+ | Migrating ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Running ;
+ Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm
+ ~value:host ;
+ let other_hosts =
+ Db.Host.get_all ~__context
+ |> List.filter (fun h ->
+ Db.Host.get_name_label ~__context ~self:h <> host_name
+ )
+ in
+ let other = match other_hosts with h :: _ -> h | [] -> Ref.null in
+ Db.VM.set_resident_on ~__context ~self:vm ~value:other
+ | Suspended ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Suspended
+ | Paused ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Paused ;
+ Db.VM.set_resident_on ~__context ~self:vm ~value:host
+ | Halted ->
+ Db.VM.set_power_state ~__context ~self:vm ~value:`Halted
+ )
+ ) ;
+ vm
+
+ let check_anti_affinity_grouping ~__context ~vm ~group expected_grouping =
+ let host_lists = rank_hosts_by_placement ~__context ~vm ~group in
+ assert_equivalent_for_grouping expected_grouping host_lists
+
+ let test {vm_to_start; other_vms; hosts; affinity_host; expected; _} () =
+ let __context = T.make_test_database () in
+ make_hosts ~__context ~hosts ;
+ let vm = make_vm_based_on_vm_info ~__context ~vm_info:vm_to_start in
+ let _ =
+ List.map
+ (fun vm -> make_vm_based_on_vm_info ~__context ~vm_info:vm)
+ other_vms
+ in
+ Db.VM.set_affinity ~__context ~self:vm
+ ~value:
+ ( match affinity_host with
+ | None ->
+ Ref.null
+ | Some host_name -> (
+ match Db.Host.get_by_name_label ~__context ~label:host_name with
+ | h :: _ ->
+ h
+ | [] ->
+ Alcotest.fail "Can not find any host by name_label"
+ )
+ ) ;
+ let group =
+ match Db.VM.get_groups ~__context ~self:vm with
+ | g :: _ ->
+ g
+ | [] ->
+ Alcotest.fail "The VM is not associated with any group"
+ in
+ check_anti_affinity_grouping ~__context ~vm ~group
+ (List.map
+ (fun list ->
+ List.map
+ (fun h ->
+ match
+ Db.Host.get_by_name_label ~__context ~label:(Option.get h)
+ with
+ | h :: _ ->
+ h
+ | [] ->
+ Alcotest.fail "Can not find any host by name_label"
+ )
+ list
+ )
+ expected
+ )
+
+ let generate_tests case = (case.description, `Quick, test case)
+
+ let tests = List.map generate_tests test_cases
+end
+
let test =
[
("test_gpus_available_succeeds", `Quick, test_gpus_available_succeeds)
@@ -612,14 +1105,24 @@ let test =
, `Quick
, test_group_hosts_netsriov_with_allocated
)
+ ; ( "test_get_group_key_anti_affinity"
+ , `Quick
+ , test_get_group_key_anti_affinity
+ )
+ ; ("test_get_group_key_normal_group", `Quick, test_get_group_key_normal_group)
; ("test_get_group_key_vgpu", `Quick, test_get_group_key_vgpu)
; ("test_get_group_key_netsriov", `Quick, test_get_group_key_netsriov)
; ( "test_get_group_key_vgpu_and_netsriov"
, `Quick
, test_get_group_key_vgpu_and_netsriov
)
+ ; ( "test_get_group_key_anti_affinity_and_vgpu_and_netsriov"
+ , `Quick
+ , test_get_group_key_anti_affinity_and_vgpu_and_netsriov
+ )
]
let () =
Suite_init.harness_init () ;
- Alcotest.run "Test VM Helpers suite" [("Test_vm_helpers", test)]
+ Alcotest.run "Test VM Helpers suite"
+ [("Test_vm_helpers", test @ VMAntiAffinityRankedGrpTest.tests)]
diff --git a/ocaml/xapi-cli-server/cli_frontend.ml b/ocaml/xapi-cli-server/cli_frontend.ml
index 13c695da5db..3c2c617fddf 100644
--- a/ocaml/xapi-cli-server/cli_frontend.ml
+++ b/ocaml/xapi-cli-server/cli_frontend.ml
@@ -2686,6 +2686,24 @@ let rec cmdtable_data : (string * cmd_spec) list =
; flags= []
}
)
+ ; ( "vm-group-create"
+ , {
+ reqd= ["name-label"; "placement"]
+ ; optn= ["name-description"]
+ ; help= "Create a VM group."
+ ; implementation= No_fd Cli_operations.VM_group.create
+ ; flags= []
+ }
+ )
+ ; ( "vm-group-destroy"
+ , {
+ reqd= ["uuid"]
+ ; optn= []
+ ; help= "Destroy a VM group."
+ ; implementation= No_fd Cli_operations.VM_group.destroy
+ ; flags= []
+ }
+ )
; ( "diagnostic-vm-status"
, {
reqd= ["uuid"]
diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml
index 54eace11b69..6aee526f497 100644
--- a/ocaml/xapi-cli-server/cli_operations.ml
+++ b/ocaml/xapi-cli-server/cli_operations.ml
@@ -1142,6 +1142,11 @@ let gen_cmds rpc session_id =
mk get_all_records_where get_by_uuid vm_appliance_record "appliance" []
[] rpc session_id
)
+ ; Client.VM_group.(
+ mk get_all_records_where get_by_uuid vm_group_record "vm-group" []
+ ["uuid"; "name-label"; "name-description"; "placement"; "vm-uuids"]
+ rpc session_id
+ )
; Client.PGPU.(
mk get_all_records_where get_by_uuid pgpu_record "pgpu" []
["uuid"; "pci-uuid"; "vendor-name"; "device-name"; "gpu-group-uuid"]
@@ -8000,3 +8005,27 @@ module Observer = struct
let self = Client.Observer.get_by_uuid ~rpc ~session_id ~uuid in
Client.Observer.destroy ~rpc ~session_id ~self
end
+
+module VM_group = struct
+ let create printer rpc session_id params =
+ let name_label = List.assoc "name-label" params in
+ let name_description =
+ List.assoc_opt "name-description" params |> Option.value ~default:""
+ in
+ let placement =
+ Record_util.vm_placement_policy_of_string (List.assoc "placement" params)
+ in
+ let ref =
+ Client.VM_group.create ~rpc ~session_id ~name_label ~name_description
+ ~placement
+ in
+ let uuid = Client.VM_group.get_uuid ~rpc ~session_id ~self:ref in
+ printer (Cli_printer.PList [uuid])
+
+ let destroy _printer rpc session_id params =
+ let ref =
+ Client.VM_group.get_by_uuid ~rpc ~session_id
+ ~uuid:(List.assoc "uuid" params)
+ in
+ Client.VM_group.destroy ~rpc ~session_id ~self:ref
+end
diff --git a/ocaml/xapi-cli-server/record_util.ml b/ocaml/xapi-cli-server/record_util.ml
index 6ee45dce5dd..92322264e36 100644
--- a/ocaml/xapi-cli-server/record_util.ml
+++ b/ocaml/xapi-cli-server/record_util.ml
@@ -1226,3 +1226,18 @@ let update_sync_frequency_of_string s =
`weekly
| _ ->
raise (Record_failure ("Expected 'daily', 'weekly', got " ^ s))
+
+let vm_placement_policy_to_string = function
+ | `normal ->
+ "normal"
+ | `anti_affinity ->
+ "anti-affinity"
+
+let vm_placement_policy_of_string a =
+ match String.lowercase_ascii a with
+ | "normal" ->
+ `normal
+ | "anti-affinity" ->
+ `anti_affinity
+ | s ->
+ raise (Record_failure ("Invalid VM placement policy, got " ^ s))
diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml
index 92c27c6917c..abcd5f3fb1c 100644
--- a/ocaml/xapi-cli-server/records.ml
+++ b/ocaml/xapi-cli-server/records.ml
@@ -1495,6 +1495,12 @@ let pool_record rpc session_id pool =
; make_field ~name:"update-sync-enabled"
~get:(fun () -> (x ()).API.pool_update_sync_enabled |> string_of_bool)
()
+ ; make_field ~name:"recommendations"
+ ~get:(fun () ->
+ Record_util.s2sm_to_string "; " (x ()).API.pool_recommendations
+ )
+ ~get_map:(fun () -> (x ()).API.pool_recommendations)
+ ()
]
}
@@ -2506,6 +2512,21 @@ let vm_record rpc session_id vm =
~value:(Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid:x)
)
()
+ ; make_field ~name:"groups"
+ ~get:(fun () -> get_uuids_from_refs (x ()).API.vM_groups)
+ ~set:(fun x ->
+ if x = "" then
+ Client.VM.set_groups ~rpc ~session_id ~self:vm ~value:[]
+ else
+ let value =
+ get_words ',' x
+ |> List.map (fun uuid ->
+ Client.VM_group.get_by_uuid ~rpc ~session_id ~uuid
+ )
+ in
+ Client.VM.set_groups ~rpc ~session_id ~self:vm ~value
+ )
+ ()
; make_field ~name:"snapshot-schedule"
~get:(fun () -> get_uuid_from_ref (x ()).API.vM_snapshot_schedule)
~set:(fun x ->
@@ -4072,6 +4093,55 @@ let vm_appliance_record rpc session_id vm_appliance =
]
}
+let vm_group_record rpc session_id vm_group =
+ let _ref = ref vm_group in
+ let empty_record =
+ ToGet (fun () -> Client.VM_group.get_record ~rpc ~session_id ~self:!_ref)
+ in
+ let record = ref empty_record in
+ let x () = lzy_get record in
+ {
+ setref=
+ (fun r ->
+ _ref := r ;
+ record := empty_record
+ )
+ ; setrefrec=
+ (fun (a, b) ->
+ _ref := a ;
+ record := Got b
+ )
+ ; record= x
+ ; getref= (fun () -> !_ref)
+ ; fields=
+ [
+ make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vM_group_uuid) ()
+ ; make_field ~name:"name-label"
+ ~get:(fun () -> (x ()).API.vM_group_name_label)
+ ~set:(fun value ->
+ Client.VM_group.set_name_label ~rpc ~session_id ~self:!_ref ~value
+ )
+ ()
+ ; make_field ~name:"name-description"
+ ~get:(fun () -> (x ()).API.vM_group_name_description)
+ ~set:(fun value ->
+ Client.VM_group.set_name_description ~rpc ~session_id ~self:!_ref
+ ~value
+ )
+ ()
+ ; make_field ~name:"placement"
+ ~get:(fun () ->
+ Record_util.vm_placement_policy_to_string
+ (x ()).API.vM_group_placement
+ )
+ ()
+ ; make_field ~name:"vm-uuids"
+ ~get:(fun () -> get_uuids_from_refs (x ()).API.vM_group_VMs)
+ ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vM_group_VMs)
+ ()
+ ]
+ }
+
let dr_task_record rpc session_id dr_task =
let _ref = ref dr_task in
let empty_record =
@@ -5237,6 +5307,12 @@ let certificate_record rpc session_id certificate =
; make_field ~name:"fingerprint"
~get:(fun () -> (x ()).API.certificate_fingerprint)
()
+ ; make_field ~name:"fingerprint_sha256"
+ ~get:(fun () -> (x ()).API.certificate_fingerprint_sha256)
+ ()
+ ; make_field ~name:"fingerprint_sha1"
+ ~get:(fun () -> (x ()).API.certificate_fingerprint_sha1)
+ ()
]
}
diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml
index 5616ba3a1c5..3998068378a 100644
--- a/ocaml/xapi-consts/api_errors.ml
+++ b/ocaml/xapi-consts/api_errors.ml
@@ -1375,3 +1375,5 @@ let telemetry_next_collection_too_late =
(* FIPS/CC_PREPARATIONS *)
let illegal_in_fips_mode = add_error "ILLEGAL_IN_FIPS_MODE"
+
+let too_many_groups = "TOO_MANY_GROUPS"
diff --git a/ocaml/xapi-consts/api_messages.ml b/ocaml/xapi-consts/api_messages.ml
index bb63facfe2a..5d9160152c2 100644
--- a/ocaml/xapi-consts/api_messages.ml
+++ b/ocaml/xapi-consts/api_messages.ml
@@ -365,3 +365,6 @@ let periodic_update_sync_failed = addMessage "PERIODIC_UPDATE_SYNC_FAILED" 3L
let xapi_startup_blocked_as_version_higher_than_coordinator =
addMessage "XAPI_STARTUP_BLOCKED_AS_VERSION_HIGHER_THAN_COORDINATOR" 2L
+
+let all_running_vms_in_anti_affinity_grp_on_single_host =
+ addMessage "ALL_RUNNING_VMS_IN_ANTI_AFFINITY_GRP_ON_SINGLE_HOST" 3L
diff --git a/ocaml/xapi-guard/lib/disk_cache.ml b/ocaml/xapi-guard/lib/disk_cache.ml
index 5e8b9bb0650..9674a4ff01b 100644
--- a/ocaml/xapi-guard/lib/disk_cache.ml
+++ b/ocaml/xapi-guard/lib/disk_cache.ml
@@ -398,9 +398,9 @@ end = struct
let* failed = retry true in
( if failed then
let elapsed = Mtime_clock.count counter in
- D.debug "%s: Pushed %s after trying for %s" __FUN
+ D.debug "%s: Pushed %s after trying for %a" __FUN
(print_key (uuid, timestamp, key))
- (Fmt.to_to_string Mtime.Span.pp elapsed)
+ Debug.Pp.mtime_span elapsed
) ;
Lwt.return_unit
diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml
index 0ad8b900e3b..fbfc4796220 100644
--- a/ocaml/xapi-storage-script/main.ml
+++ b/ocaml/xapi-storage-script/main.ml
@@ -684,7 +684,7 @@ let vdi_of_volume x =
; snapshot_time= find_string _snapshot_time_key ~default:"19700101T00:00:00Z"
; snapshot_of= Vdi.of_string (find_string _snapshot_of_key ~default:"")
; read_only= not x.Xapi_storage.Control.read_write
- ; cbt_enabled= false
+ ; cbt_enabled= Option.value x.Xapi_storage.Control.cbt_enabled ~default:false
; virtual_size= x.Xapi_storage.Control.virtual_size
; physical_utilisation= x.Xapi_storage.Control.physical_utilisation
; sm_config= []
@@ -1046,6 +1046,10 @@ let bind ~volume_script_dir =
Healthy
| Xapi_storage.Control.Recovering _ ->
Recovering
+ | Xapi_storage.Control.Unreachable _ ->
+ Unreachable
+ | Xapi_storage.Control.Unavailable _ ->
+ Unavailable
)
}
in
@@ -1405,6 +1409,10 @@ let bind ~volume_script_dir =
Healthy
| Xapi_storage.Control.Recovering _ ->
Recovering
+ | Xapi_storage.Control.Unreachable _ ->
+ Unreachable
+ | Xapi_storage.Control.Unavailable _ ->
+ Unavailable
)
}
)
@@ -1545,9 +1553,13 @@ let bind ~volume_script_dir =
@@
let* sr = Attached_SRs.find sr in
let vdi = Storage_interface.Vdi.string_of vdi in
- return_volume_rpc (fun () ->
- Volume_client.data_destroy volume_rpc dbg sr vdi
- )
+ let* response =
+ return_volume_rpc (fun () ->
+ Volume_client.data_destroy volume_rpc dbg sr vdi
+ )
+ in
+ let* () = set ~dbg ~sr ~vdi ~key:_vdi_type_key ~value:"cbt_metadata" in
+ Deferred.Result.return response
in
S.VDI.data_destroy vdi_data_destroy_impl ;
let u name _ = failwith ("Unimplemented: " ^ name) in
diff --git a/ocaml/xapi-storage/generator/lib/control.ml b/ocaml/xapi-storage/generator/lib/control.ml
index f4d8a22a4a5..e34c0183c36 100644
--- a/ocaml/xapi-storage/generator/lib/control.ml
+++ b/ocaml/xapi-storage/generator/lib/control.ml
@@ -28,6 +28,10 @@ type health =
| Healthy of string (** Storage is fully available *)
| Recovering of string
(** Storage is busy recovering, e.g. rebuilding mirrors *)
+ | Unreachable of string
+ (** Storage is unreachable but may be recoverable with admin intervention *)
+ | Unavailable of string
+ (** Storage is unavailable, a host reboot will be required *)
[@@deriving rpcty]
type volume_type =
diff --git a/ocaml/xapi-types/features.ml b/ocaml/xapi-types/features.ml
index 37fafc0905a..6e838f32b83 100644
--- a/ocaml/xapi-types/features.ml
+++ b/ocaml/xapi-types/features.ml
@@ -64,6 +64,7 @@ type feature =
| Updates
| Internal_repo_access
| VTPM
+ | VM_groups
[@@deriving rpc]
type orientation = Positive | Negative
@@ -132,6 +133,7 @@ let keys_of_features =
, ("restrict_internal_repo_access", Negative, "Internal_repo_access")
)
; (VTPM, ("restrict_vtpm", Negative, "VTPM"))
+ ; (VM_groups, ("restrict_vm_groups", Negative, "VM_groups"))
]
(* A list of features that must be considered "enabled" by `of_assoc_list`
diff --git a/ocaml/xapi-types/features.mli b/ocaml/xapi-types/features.mli
index c2f1ed2a51b..bcd1ef4ac66 100644
--- a/ocaml/xapi-types/features.mli
+++ b/ocaml/xapi-types/features.mli
@@ -72,6 +72,7 @@ type feature =
| Internal_repo_access
(** Enable restriction on repository access to pool members only *)
| VTPM (** Support VTPM device required by Win11 guests *)
+ | VM_groups (** Enable use of VM groups *)
val feature_of_rpc : Rpc.t -> feature
(** Convert RPC into {!feature}s *)
diff --git a/ocaml/xapi/api_server.ml b/ocaml/xapi/api_server.ml
index 711655148b3..c5870d8555f 100644
--- a/ocaml/xapi/api_server.ml
+++ b/ocaml/xapi/api_server.ml
@@ -39,6 +39,7 @@ module Actions = struct
module VMPP = Xapi_vmpp
module VMSS = Xapi_vmss
module VM_appliance = Xapi_vm_appliance
+ module VM_group = Xapi_vm_group
module DR_task = Xapi_dr_task
module LVHD = struct end
@@ -250,24 +251,28 @@ let is_host_is_slave_error (response : Rpc.response) =
false
let create_thumbprint_header req response =
- let include_thumbprint =
+ let hash_type_opt =
match
List.assoc_opt
!Xapi_globs.cert_thumbprint_header_request
req.Http.Request.additional_headers
with
- | Some x when x = !Xapi_globs.cert_thumbprint_header_value ->
- true
+ | Some x when x = !Xapi_globs.cert_thumbprint_header_value_sha256 ->
+ Some `Sha256
+ | Some x when x = !Xapi_globs.cert_thumbprint_header_value_sha1 ->
+ Some `Sha1
| _ ->
- false
+ None
in
- if include_thumbprint && is_host_is_slave_error response then
- Helpers.external_certificate_thumbprint_of_master ()
- |> Option.fold ~none:[] ~some:(fun x ->
- [(!Xapi_globs.cert_thumbprint_header_response, x)]
- )
- else
- []
+ Option.bind hash_type_opt (fun hash_type ->
+ if is_host_is_slave_error response then
+ Helpers.external_certificate_thumbprint_of_master ~hash_type
+ else
+ None
+ )
+ |> Option.fold ~none:[] ~some:(fun x ->
+ [(!Xapi_globs.cert_thumbprint_header_response, x)]
+ )
module Unixext = Xapi_stdext_unix.Unixext
diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml
index 0204b7b064a..4f6747762ea 100644
--- a/ocaml/xapi/certificates.ml
+++ b/ocaml/xapi/certificates.ml
@@ -66,7 +66,7 @@ let update_ca_bundle () = Helpers.update_ca_bundle ()
let to_string = function CA_Certificate -> "CA certificate" | CRL -> "CRL"
(** {pp_hash hash} outputs the hexadecimal representation of the {hash}
- adding a semicolon between every octet, in uppercase.
+ adding a colon between every octet, in uppercase.
*)
let pp_hash hash =
let hex = Hex.(show @@ of_cstruct hash) in
@@ -218,13 +218,17 @@ end = struct
let not_before, not_after =
dates_of_ptimes (X509.Certificate.validity certificate)
in
- let fingerprint =
+ let fingerprint_sha256 =
X509.Certificate.fingerprint `SHA256 certificate |> pp_hash
in
+ let fingerprint_sha1 =
+ X509.Certificate.fingerprint `SHA1 certificate |> pp_hash
+ in
let uuid = Uuidx.(to_string (make ())) in
let ref' = Ref.make () in
Db.Certificate.create ~__context ~ref:ref' ~uuid ~host ~not_before
- ~not_after ~fingerprint ~name ~_type ;
+ ~not_after ~fingerprint:fingerprint_sha256 ~fingerprint_sha256
+ ~fingerprint_sha1 ~name ~_type ;
debug "added cert %s under uuid=%s ref=%s" name uuid (Ref.string_of ref') ;
post_action () ;
ref'
diff --git a/ocaml/xapi/certificates_sync.ml b/ocaml/xapi/certificates_sync.ml
index 281f63b2154..735b1a9c936 100644
--- a/ocaml/xapi/certificates_sync.ml
+++ b/ocaml/xapi/certificates_sync.ml
@@ -32,7 +32,9 @@ let install ~__context ~host:_ ~type' cert =
(** determine if the database is up to date by comparing the fingerprint
of xapi-ssl.pem with the entry in the database *)
let is_unchanged ~__context cert_ref cert =
- let ref_hash = Db.Certificate.get_fingerprint ~__context ~self:cert_ref in
+ let ref_hash =
+ Db.Certificate.get_fingerprint_sha256 ~__context ~self:cert_ref
+ in
let cert_hash =
X509.Certificate.fingerprint `SHA256 cert |> Certificates.pp_hash
in
@@ -100,7 +102,7 @@ let update ~__context =
let internal_error fmt =
fmt
- |> Printf.kprintf @@ fun msg ->
+ |> Printf.ksprintf @@ fun msg ->
error "%s" msg ;
raise Api_errors.(Server_error (internal_error, [msg]))
diff --git a/ocaml/xapi/dbsync_master.ml b/ocaml/xapi/dbsync_master.ml
index fbe0dc9273a..8f8e6a582f8 100644
--- a/ocaml/xapi/dbsync_master.ml
+++ b/ocaml/xapi/dbsync_master.ml
@@ -19,6 +19,7 @@ module D = Debug.Make (struct let name = "dbsync" end)
open D
open Client
+open Recommendations
(* Synchronising code which is specific to the master *)
@@ -53,7 +54,7 @@ let create_pool_record ~__context =
~last_update_sync:Xapi_stdext_date.Date.epoch
~update_sync_frequency:`weekly ~update_sync_day:0L
~update_sync_enabled:false ~local_auth_max_threads:8L
- ~ext_auth_max_threads:1L
+ ~ext_auth_max_threads:1L ~recommendations:[]
let set_master_ip ~__context =
let ip =
@@ -339,6 +340,18 @@ let setup_telemetry ~__context =
)
()
+let update_pool_recommendations_noexn ~__context =
+ Helpers.log_exn_continue "update pool recommendations"
+ (fun () ->
+ let pool = Helpers.get_pool ~__context in
+ let recommendations =
+ Recommendations.load ~path:!Xapi_globs.pool_recommendations_dir
+ |> StringMap.bindings
+ in
+ Db.Pool.set_recommendations ~__context ~self:pool ~value:recommendations
+ )
+ ()
+
(* Update the database to reflect current state. Called for both start of day and after
an agent restart. *)
let update_env __context =
@@ -363,4 +376,5 @@ let update_env __context =
Storage_access.on_xapi_start ~__context ;
if !Xapi_globs.create_tools_sr then
create_tools_sr_noexn __context ;
- ensure_vm_metrics_records_exist_noexn __context
+ ensure_vm_metrics_records_exist_noexn __context ;
+ update_pool_recommendations_noexn ~__context
diff --git a/ocaml/xapi/dune b/ocaml/xapi/dune
index 6575b66aea5..7492df39e68 100644
--- a/ocaml/xapi/dune
+++ b/ocaml/xapi/dune
@@ -84,6 +84,7 @@
pam
pciutil
pci
+ psq
ptime
rpclib.core
rpclib.json
diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml
index ba58ddd7b92..69e2ba3ce24 100644
--- a/ocaml/xapi/helpers.ml
+++ b/ocaml/xapi/helpers.ml
@@ -2041,30 +2041,35 @@ let update_ca_bundle =
)
)
-let external_certificate_thumbprint_of_master ?(hash_type = `Sha256) () =
- match hash_type with
- | `Sha256 ->
- Server_helpers.exec_with_new_task
- "Get master's external certificate thumbprint" (fun __context ->
- let master_ref = get_master ~__context in
- let certs =
- Db.Certificate.get_records_where ~__context
- ~expr:
- (And
- ( Eq (Field "host", Literal (Ref.string_of master_ref))
- , Eq (Field "type", Literal "host")
- )
- )
- in
- match certs with
- | [] ->
- debug "Failed to fetch master's external certificate" ;
- None
- | (_, cert_record) :: _ ->
- Some cert_record.certificate_fingerprint
- )
- | _ ->
- None
+let external_certificate_thumbprint_of_master ~hash_type =
+ if List.mem hash_type [`Sha256; `Sha1] then
+ Server_helpers.exec_with_new_task
+ "Get master's external certificate thumbprint" (fun __context ->
+ let master_ref = get_master ~__context in
+ let certs =
+ Db.Certificate.get_records_where ~__context
+ ~expr:
+ (And
+ ( Eq (Field "host", Literal (Ref.string_of master_ref))
+ , Eq (Field "type", Literal "host")
+ )
+ )
+ in
+ match certs with
+ | [] ->
+ debug "%s: Failed to fetch master's external certificate"
+ __FUNCTION__ ;
+ None
+ | (_, cert_record) :: _ -> (
+ match hash_type with
+ | `Sha256 ->
+ Some cert_record.certificate_fingerprint_sha256
+ | `Sha1 ->
+ Some cert_record.certificate_fingerprint_sha1
+ )
+ )
+ else
+ None
let unit_test ~__context : bool =
Pool_role.is_unit_test ()
diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml
index d695b94469a..bc9d3e1db0b 100644
--- a/ocaml/xapi/import.ml
+++ b/ocaml/xapi/import.ml
@@ -1832,7 +1832,7 @@ module VTPM : HandlerTools = struct
type precheck_t = Import of vtpm'
let fail fmt =
- Printf.kprintf
+ Printf.ksprintf
(fun msg -> raise Api_errors.(Server_error (import_error_generic, [msg])))
fmt
diff --git a/ocaml/xapi/importexport.ml b/ocaml/xapi/importexport.ml
index 12562e53c1b..869aac2a5f0 100644
--- a/ocaml/xapi/importexport.ml
+++ b/ocaml/xapi/importexport.ml
@@ -495,7 +495,7 @@ module Devicetype = struct
| "vtpm" ->
VTPM
| other ->
- let fail fmt = Printf.kprintf failwith fmt in
+ let fail fmt = Printf.ksprintf failwith fmt in
fail "%s: Type '%s' not one of [%s]" __FUNCTION__ other
(String.concat "; " (List.map to_string all))
end
diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml
index 6be9f50d4c0..935ed8cf7e8 100644
--- a/ocaml/xapi/message_forwarding.ml
+++ b/ocaml/xapi/message_forwarding.ml
@@ -416,6 +416,17 @@ functor
Ref.string_of vm_appliance
with _ -> "invalid"
+ let vm_group_uuid ~__context vm_group =
+ try
+ if Pool_role.is_master () then
+ let name = Db.VM_group.get_name_label ~__context ~self:vm_group in
+ Printf.sprintf "%s%s"
+ (Db.VM_group.get_uuid ~__context ~self:vm_group)
+ (add_brackets name)
+ else
+ Ref.string_of vm_group
+ with _ -> "invalid"
+
let sr_uuid ~__context sr =
try
if Pool_role.is_master () then
@@ -3017,6 +3028,15 @@ functor
(vm_appliance_uuid ~__context value) ;
Local.VM.set_appliance ~__context ~self ~value
+ let set_groups ~__context ~self ~value =
+ info "VM.set_groups : self = '%s'; value = [ %s ]"
+ (vm_uuid ~__context self)
+ (String.concat "; " (List.map (vm_group_uuid ~__context) value)) ;
+ let original_groups = Db.VM.get_groups ~__context ~self in
+ Local.VM.set_groups ~__context ~self ~value ;
+ Xapi_vm_group_helpers.update_vm_anti_affinity_alert ~__context
+ ~groups:(original_groups @ value)
+
let import_convert ~__context ~_type ~username ~password ~sr
~remote_config =
info "VM.import_convert: type = '%s'; remote_config = '%s;'" _type
@@ -6568,6 +6588,23 @@ functor
)
end
+ module VM_group = struct
+ let create ~__context ~name_label ~name_description ~placement =
+ info
+ "VM_group.create: name_label = '%s'; name_description = '%s'; \
+ placement = '%s'"
+ name_label name_description
+ (Record_util.vm_placement_policy_to_string placement) ;
+ Local.VM_group.create ~__context ~name_label ~name_description
+ ~placement
+
+ let destroy ~__context ~self =
+ info "VM_group.destroy: self = '%s'" (vm_group_uuid ~__context self) ;
+ Xapi_vm_group_helpers.remove_vm_anti_affinity_alert ~__context
+ ~groups:[self] ;
+ Local.VM_group.destroy ~__context ~self
+ end
+
module Observer = struct
module RefSet = Set.Make (struct
type t = [`host] Ref.t
diff --git a/ocaml/xapi/pool_features.ml b/ocaml/xapi/pool_features.ml
index d3a14dd5221..8821224872a 100644
--- a/ocaml/xapi/pool_features.ml
+++ b/ocaml/xapi/pool_features.ml
@@ -13,22 +13,14 @@
open Features
-module D = Debug.Make (struct let name = "pool_features" end)
-
-open D
-
(*
Terminology:
- (Feature) flags: The keys in pool.restriction and host.license_params. Strings like "restrict_dmc".
- Params: An instance of host.license_params.
- Restrictions: A (string * string) list of feature flag to a Boolean string value ("true" or "false").
- Features: Values of type Features.feature.
- - Core: Relating to features known by xapi, as define in features.ml.
- - Additional: Relating to features provided by v6d beyond the core ones.
*)
-let all_flags = List.map (fun (k, _) -> k) (to_assoc_list all_features)
-
let get_pool_features ~__context =
let pool = Helpers.get_pool ~__context in
of_assoc_list (Db.Pool.get_restrictions ~__context ~self:pool)
@@ -43,78 +35,3 @@ let assert_enabled ~__context ~f =
(Api_errors.Server_error
(Api_errors.license_restriction, [name_of_feature f])
)
-
-(* The set of core restrictions of a pool is the intersection of the sets of features
- of the individual hosts. *)
-let compute_core_features all_host_params =
- List.map of_assoc_list all_host_params
- |> List.fold_left Xapi_stdext_std.Listext.List.intersect all_features
-
-(* Find the feature flags in the given license params that are not represented
- in the feature type. These are additional flags given to us by v6d.
- Assume that their names always start with "restrict_". *)
-let find_additional_flags params =
- let kvs =
- List.filter
- (fun (k, _) ->
- try String.sub k 0 9 = "restrict_" && not (List.mem k all_flags)
- with Invalid_argument _ -> false
- )
- params
- in
- List.map fst kvs
-
-(* Determine the set of additional features. For each restrict_ flag,
- looks for matching flags on all hosts; if one of them is restricted ("true")
- or absent, then the feature on the pool level is marked as restricted. *)
-let rec compute_additional_restrictions all_host_params = function
- | [] ->
- []
- | flag :: rest ->
- let switches =
- List.map
- (function
- | params ->
- if List.mem_assoc flag params then
- bool_of_string (List.assoc flag params)
- else
- true
- )
- all_host_params
- in
- (flag, string_of_bool (List.fold_left ( || ) false switches))
- :: compute_additional_restrictions all_host_params rest
-
-(* Combine the host-level feature restrictions into pool-level ones, and write
- the result to the database. *)
-let update_pool_features ~__context =
- (* Get information from the database *)
- let pool = Helpers.get_pool ~__context in
- let old_restrictions = Db.Pool.get_restrictions ~__context ~self:pool in
- let all_host_params =
- List.map
- (fun (_, host_r) -> host_r.API.host_license_params)
- (Db.Host.get_all_records ~__context)
- in
- let master_params =
- let master_ref = Db.Pool.get_master ~__context ~self:pool in
- Db.Host.get_license_params ~__context ~self:master_ref
- in
- (* Determine the set of core restrictions *)
- let new_core_features = compute_core_features all_host_params in
- let new_core_restrictions = to_assoc_list new_core_features in
- (* Determine the set of additional restrictions *)
- let additional_flags = find_additional_flags master_params in
- let new_additional_restrictions =
- compute_additional_restrictions all_host_params additional_flags
- in
- (* The complete set of restrictions is formed by the core feature plus the additional features *)
- let new_restrictions = new_additional_restrictions @ new_core_restrictions in
- (* Update the DB if the restrictions have changed *)
- if new_restrictions <> old_restrictions then (
- let old_core_features = of_assoc_list old_restrictions in
- info "Old pool features enabled: %s" (to_compact_string old_core_features) ;
- info "New pool features enabled: %s" (to_compact_string new_core_features) ;
- Db.Pool.set_restrictions ~__context ~self:pool ~value:new_restrictions ;
- Xapi_pool_helpers.apply_guest_agent_config ~__context
- )
diff --git a/ocaml/xapi/pool_features.mli b/ocaml/xapi/pool_features.mli
index 714c92ca757..9e4cbcef405 100644
--- a/ocaml/xapi/pool_features.mli
+++ b/ocaml/xapi/pool_features.mli
@@ -20,6 +20,3 @@ val is_enabled : __context:Context.t -> Features.feature -> bool
val assert_enabled : __context:Context.t -> f:Features.feature -> unit
(** Raise appropriate exception if feature is not enabled. *)
-
-val update_pool_features : __context:Context.t -> unit
-(** Update the pool-level restrictions list in the database. *)
diff --git a/ocaml/xapi/pool_features_helpers.ml b/ocaml/xapi/pool_features_helpers.ml
new file mode 100644
index 00000000000..dda8619013c
--- /dev/null
+++ b/ocaml/xapi/pool_features_helpers.ml
@@ -0,0 +1,108 @@
+(*
+ * Copyright (c) 2024 Cloud Software Group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+open Features
+
+module D = Debug.Make (struct let name = "pool_features_helpers" end)
+
+open D
+
+(*
+ Terminology:
+ - (Feature) flags: The keys in pool.restriction and host.license_params. Strings like "restrict_dmc".
+ - Params: An instance of host.license_params.
+ - Restrictions: A (string * string) list of feature flag to a Boolean string value ("true" or "false").
+ - Features: Values of type Features.feature.
+ - Core: Relating to features known by xapi, as define in features.ml.
+ - Additional: Relating to features provided by v6d beyond the core ones.
+*)
+
+let all_flags = List.map (fun (k, _) -> k) (to_assoc_list all_features)
+
+(* The set of core restrictions of a pool is the intersection of the sets of features
+ of the individual hosts. *)
+let compute_core_features all_host_params =
+ List.map of_assoc_list all_host_params
+ |> List.fold_left Xapi_stdext_std.Listext.List.intersect all_features
+
+(* Find the feature flags in the given license params that are not represented
+ in the feature type. These are additional flags given to us by v6d.
+ Assume that their names always start with "restrict_". *)
+let find_additional_flags params =
+ let kvs =
+ List.filter
+ (fun (k, _) ->
+ try String.sub k 0 9 = "restrict_" && not (List.mem k all_flags)
+ with Invalid_argument _ -> false
+ )
+ params
+ in
+ List.map fst kvs
+
+(* Determine the set of additional features. For each restrict_ flag,
+ looks for matching flags on all hosts; if one of them is restricted ("true")
+ or absent, then the feature on the pool level is marked as restricted. *)
+let rec compute_additional_restrictions all_host_params = function
+ | [] ->
+ []
+ | flag :: rest ->
+ let switches =
+ List.map
+ (function
+ | params ->
+ if List.mem_assoc flag params then
+ bool_of_string (List.assoc flag params)
+ else
+ true
+ )
+ all_host_params
+ in
+ (flag, string_of_bool (List.fold_left ( || ) false switches))
+ :: compute_additional_restrictions all_host_params rest
+
+(* Combine the host-level feature restrictions into pool-level ones, and write
+ the result to the database. *)
+let update_pool_features ~__context =
+ (* Get information from the database *)
+ let pool = Helpers.get_pool ~__context in
+ let old_restrictions = Db.Pool.get_restrictions ~__context ~self:pool in
+ let all_host_params =
+ List.map
+ (fun (_, host_r) -> host_r.API.host_license_params)
+ (Db.Host.get_all_records ~__context)
+ in
+ let master_params =
+ let master_ref = Db.Pool.get_master ~__context ~self:pool in
+ Db.Host.get_license_params ~__context ~self:master_ref
+ in
+ (* Determine the set of core restrictions *)
+ let new_core_features = compute_core_features all_host_params in
+ let new_core_restrictions = to_assoc_list new_core_features in
+ (* Determine the set of additional restrictions *)
+ let additional_flags = find_additional_flags master_params in
+ let new_additional_restrictions =
+ compute_additional_restrictions all_host_params additional_flags
+ in
+ (* The complete set of restrictions is formed by the core feature plus the additional features *)
+ let new_restrictions = new_additional_restrictions @ new_core_restrictions in
+ (* Update the DB if the restrictions have changed *)
+ if new_restrictions <> old_restrictions then (
+ let old_core_features = of_assoc_list old_restrictions in
+ info "Old pool features enabled: %s" (to_compact_string old_core_features) ;
+ info "New pool features enabled: %s" (to_compact_string new_core_features) ;
+ Db.Pool.set_restrictions ~__context ~self:pool ~value:new_restrictions ;
+ Xapi_vm_group_helpers.maybe_update_alerts_on_feature_change ~__context
+ ~old_restrictions ~new_restrictions ;
+ Xapi_pool_helpers.apply_guest_agent_config ~__context
+ )
diff --git a/ocaml/xapi/pool_features_helpers.mli b/ocaml/xapi/pool_features_helpers.mli
new file mode 100755
index 00000000000..d5d610a3544
--- /dev/null
+++ b/ocaml/xapi/pool_features_helpers.mli
@@ -0,0 +1,15 @@
+(*
+ * Copyright (c) 2024 Cloud Software Group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+val update_pool_features : __context:Context.t -> unit
diff --git a/ocaml/xapi/recommendations.ml b/ocaml/xapi/recommendations.ml
new file mode 100644
index 00000000000..be35ba1316a
--- /dev/null
+++ b/ocaml/xapi/recommendations.ml
@@ -0,0 +1,50 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+module Unixext = Xapi_stdext_unix.Unixext
+module Config_file = Xcp_service.Config_file
+
+module D = Debug.Make (struct let name = "recommendations" end)
+
+open D
+module StringMap = Map.Make (String)
+
+let process_line map data =
+ match Config_file.parse_line data with
+ | Some (k, v) ->
+ debug "Parsing data, key: %s, value: %s" k v ;
+ StringMap.add k v map
+ | None ->
+ map
+
+let parse map filename =
+ debug "Parsing recommendations file: %s" filename ;
+ Unixext.file_lines_fold process_line map filename
+
+let load ~path =
+ (try Sys.readdir path with _ -> [||])
+ |> Array.to_list
+ |> List.filter (fun f -> Filename.check_suffix f ".conf")
+ |> List.stable_sort compare
+ |> List.map (Filename.concat path)
+ |> List.filter (fun f ->
+ match Unix.((stat f).st_kind) with
+ | Unix.S_REG ->
+ true
+ | _ ->
+ false
+ | exception _ ->
+ false
+ )
+ |> List.fold_left parse StringMap.empty
diff --git a/ocaml/xapi/recommendations.mli b/ocaml/xapi/recommendations.mli
new file mode 100644
index 00000000000..a97a4a39c6d
--- /dev/null
+++ b/ocaml/xapi/recommendations.mli
@@ -0,0 +1,17 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+module StringMap : Map.S with type key = string
+
+val load : path:string -> string StringMap.t
diff --git a/ocaml/xapi/taskHelper.ml b/ocaml/xapi/taskHelper.ml
index abe7f4b4599..b8e9d8b3ca2 100644
--- a/ocaml/xapi/taskHelper.ml
+++ b/ocaml/xapi/taskHelper.ml
@@ -53,6 +53,16 @@ let rbac_assert_permission_fn = ref None
(* required to break dep-cycle with rbac.ml *)
+let are_auth_user_ids_of_sessions_equal ~__context s1 s2 =
+ Context.with_tracing ~__context __FUNCTION__ @@ fun __context ->
+ let s1_auth_user_sid =
+ Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:s1
+ in
+ let s2_auth_user_sid =
+ Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:s2
+ in
+ s1_auth_user_sid = s2_auth_user_sid
+
let assert_op_valid ?(ok_if_no_session_in_context = false) ~__context task_id =
let assert_permission_task_op_any () =
match !rbac_assert_permission_fn with
@@ -77,19 +87,14 @@ let assert_op_valid ?(ok_if_no_session_in_context = false) ~__context task_id =
| Some context_session ->
let is_own_task =
try
- let task_session =
- Db_actions.DB_Action.Task.get_session ~__context ~self:task_id
- in
- let task_auth_user_sid =
- Db_actions.DB_Action.Session.get_auth_user_sid ~__context
- ~self:task_session
- in
- let context_auth_user_sid =
- Db_actions.DB_Action.Session.get_auth_user_sid ~__context
- ~self:context_session
- in
- (*debug "task_auth_user_sid=%s,context_auth_user_sid=%s" task_auth_user_sid context_auth_user_sid;*)
- task_auth_user_sid = context_auth_user_sid
+ (* If the task id is the same as the context's task id, we don't need
+ to go theough their respective sessions.*)
+ match Context.get_task_id __context = task_id with
+ | true ->
+ true
+ | false ->
+ are_auth_user_ids_of_sessions_equal ~__context context_session
+ (Db_actions.DB_Action.Task.get_session ~__context ~self:task_id)
with e ->
debug "assert_op_valid: %s" (ExnHelper.string_of_exn e) ;
false
diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml
index c31ed490a0d..ad4f35e37ed 100644
--- a/ocaml/xapi/xapi_globs.ml
+++ b/ocaml/xapi/xapi_globs.ml
@@ -951,7 +951,12 @@ let ignore_vtpm_unimplemented = ref false
let evacuation_batch_size = ref 10
-type xapi_globs_spec_ty = Float of float ref | Int of int ref
+type xapi_globs_spec =
+ | Float of float ref
+ | Int of int ref
+ | ShortDurationFromSeconds of Mtime.Span.t ref
+ (** From float, max of 104 days *)
+ | LongDurationFromSeconds of Mtime.Span.t ref (** From int *)
let extauth_ad_backend = ref "winbind"
@@ -1015,7 +1020,9 @@ let max_observer_file_size = ref (1 lsl 20)
let cert_thumbprint_header_request =
ref "x-xenapi-request-host-certificate-thumbprint"
-let cert_thumbprint_header_value = ref "sha-256:master"
+let cert_thumbprint_header_value_sha256 = ref "sha-256:master"
+
+let cert_thumbprint_header_value_sha1 = ref "sha-1:master"
let cert_thumbprint_header_response =
ref "x-xenapi-response-host-certificate-thumbprint"
@@ -1029,6 +1036,8 @@ let python3_path = ref "/usr/bin/python3"
let observer_experimental_components =
ref (StringSet.singleton Constants.observer_component_smapi)
+let pool_recommendations_dir = ref "/etc/xapi.pool-recommendations.d"
+
let disable_webserver = ref false
let xapi_globs_spec =
@@ -1120,13 +1129,41 @@ let options_of_xapi_globs_spec =
List.map
(fun (name, ty) ->
( name
- , (match ty with Float x -> Arg.Set_float x | Int x -> Arg.Set_int x)
+ , ( match ty with
+ | Float x ->
+ Arg.Set_float x
+ | Int x ->
+ Arg.Set_int x
+ | ShortDurationFromSeconds x ->
+ Arg.Float
+ (fun y ->
+ match Clock.Timer.s_to_span y with
+ | Some y ->
+ x := y
+ | None ->
+ D.warn
+ "Ignoring argument '%s', invalid float being used: %f. \
+ (it only allows durations of less than 104 days)"
+ name y
+ )
+ | LongDurationFromSeconds x ->
+ Arg.Int (fun y -> x := Mtime.Span.(y * s))
+ )
, (fun () ->
match ty with
| Float x ->
string_of_float !x
| Int x ->
string_of_int !x
+ | ShortDurationFromSeconds x ->
+ let literal =
+ Mtime.Span.to_uint64_ns !x |> fun ns ->
+ Int64.div ns 1_000_000_000L |> Int64.to_int |> string_of_int
+ in
+ Fmt.str "%s (%a)" literal Mtime.Span.pp !x
+ | LongDurationFromSeconds x ->
+ let literal = Clock.Timer.span_to_s !x |> string_of_float in
+ Fmt.str "%s (%a)" literal Mtime.Span.pp !x
)
, Printf.sprintf "Set the value of '%s'" name
)
@@ -1821,6 +1858,10 @@ module Resources = struct
, trace_log_dir
, "Directory for storing traces exported to logs"
)
+ ; ( "pool-recommendations-dir"
+ , pool_recommendations_dir
+ , "Directory containing files with recommendations in key=value format"
+ )
]
let xcp_resources =
diff --git a/ocaml/xapi/xapi_ha_vm_failover.ml b/ocaml/xapi/xapi_ha_vm_failover.ml
index 4fbf46860f2..4aa9ee17128 100644
--- a/ocaml/xapi/xapi_ha_vm_failover.ml
+++ b/ocaml/xapi/xapi_ha_vm_failover.ml
@@ -175,9 +175,6 @@ let order_f (_, vm_rec) =
let ( $ ) x y = x y
-(*****************************************************************************************************)
-(* Planning code follows *)
-
(* Compute the total memory required of a VM (Running or not) *)
let total_memory_of_vm ~__context policy snapshot =
let main, shadow =
@@ -185,50 +182,418 @@ let total_memory_of_vm ~__context policy snapshot =
in
Int64.add main shadow
-(** Return a VM -> Host plan for the Host.evacuate code. We assume the VMs are all agile. The returned plan may
- be incomplete if there was not enough memory. *)
-let compute_evacuation_plan ~__context total_hosts remaining_hosts
- vms_and_snapshots =
- let hosts =
- List.map
- (fun host ->
- ( host
- , Memory_check.host_compute_free_memory_with_maximum_compression
- ~__context ~host None
- )
- )
- remaining_hosts
+let host_free_memory ~__context ~host =
+ Memory_check.host_compute_free_memory_with_maximum_compression ~__context
+ ~host None
+
+let vm_memory ~__context snapshot =
+ let policy =
+ match Helpers.check_domain_type snapshot.API.vM_domain_type with
+ | `hvm | `pv ->
+ Memory_check.Dynamic_min
+ | `pv_in_pvh | `pvh ->
+ Memory_check.Static_max
in
- let vms =
- List.map
- (fun (vm, snapshot) ->
- let policy =
- match Helpers.check_domain_type snapshot.API.vM_domain_type with
- | `hvm | `pv ->
- Memory_check.Dynamic_min
- | `pv_in_pvh | `pvh ->
- Memory_check.Static_max
+ total_memory_of_vm ~__context policy snapshot
+
+module VMGrpRefOrd = struct
+ type t = [`VM_group] Ref.t
+
+ let compare = Ref.compare
+end
+
+module VMGrpMap = Map.Make (VMGrpRefOrd)
+
+module HostKey = struct
+ type t = [`host] Ref.t
+
+ let compare = Ref.compare
+end
+
+(* For a VM anti-affinity group, the state of a host which determines
+ evacuation planning for anti-affinity VMs in that group:
+ 1. vm_cnt: the number of running VMs in that group resident on the host
+ 2. h_size: the amount of free memory of the host *)
+module HostStatistics = struct
+ type t = {vm_cnt: int; h_size: int64}
+
+ (* During evacuation planning for anti-affinity VMs, "vm_cnt" is the first
+ factor considered, "h_size" is the second factor considered:
+ Let's say the next anti-affinity VM to be planned belongs to group A, the
+ host to be selected should be the one which has minimal "vm_cnt" of group
+ A, for hosts with the same "vm_cnt", pick the one with the minimal
+ "h_size" which can hold the VM(h_size >= vm_size). *)
+ let compare {vm_cnt= vm_cnt_0; h_size= h_size_0}
+ {vm_cnt= vm_cnt_1; h_size= h_size_1} =
+ match Int.compare vm_cnt_0 vm_cnt_1 with
+ | 0 ->
+ Int64.compare h_size_0 h_size_1
+ | c ->
+ c
+end
+
+(* A Psq of hosts for an anti-affinity group, which is used for evacuation
+ planning for anti-affinity VMs in that group: the minimal host in this Psq
+ is the first one to be considered to plan a VM from that group.
+ When several hosts share the minimal "HostStatistics",
+ the minimal host is the host with the smallest ref. *)
+module AntiAffEvacPlanHostPsq = Psq.Make (HostKey) (HostStatistics)
+
+(* The spread evenly plan pool state determines the spread evenly evacuation
+ planning for anti-affinity VMs.
+ It's a VMGrpMap which contains a Psq for each group, and each Psq contains
+ all the available hosts in the pool.
+ Let's say the anti-affinity VM to be planned belongs to anti-affinity group
+ A. To get a spread evenly evacuation plan, the most suitable host to plan
+ the VM would be the host which has the minimal number of running VMs from
+ group A resident on it, for the hosts with the same number of running VMs
+ from group A, the one with the minimal free memory will be checked first,
+ which is just the minimal host returned from "Psq.min" on the Psq of group
+ A. *)
+let init_spread_evenly_plan_pool_state ~__context anti_aff_vms hosts =
+ let module Q = AntiAffEvacPlanHostPsq in
+ let gen_psq grp =
+ let module H = Xapi_vm_helpers in
+ let host_vm_cnt = H.host_to_vm_count_map ~__context grp in
+ List.fold_left
+ (fun q (h, h_size) ->
+ let vm_cnt =
+ H.HostMap.find_opt h host_vm_cnt |> Option.value ~default:0
in
- (vm, total_memory_of_vm ~__context policy snapshot)
+ Q.add h {vm_cnt; h_size} q
)
- vms_and_snapshots
+ Q.empty hosts
in
+ let module VMGrpSet = Set.Make (VMGrpRefOrd) in
+ anti_aff_vms |> List.map (fun (_, _, grp) -> grp) |> VMGrpSet.of_list
+ |> fun s ->
+ VMGrpSet.fold
+ (fun grp grp_psq -> VMGrpMap.add grp (gen_psq grp) grp_psq)
+ s VMGrpMap.empty
+
+(* Update "spread_evenly_plan_pool_state" after a VM from anti-affinity "group"
+ with memory size: "vm_size" is planned on the "host":
+ 1. For the "group", increase "vm_cnt" of the "host" by 1.
+ 2. For each group, updates the host's size by substracting "vm_size". *)
+let update_spread_evenly_plan_pool_state vm_size group host pool_state =
+ let module Q = AntiAffEvacPlanHostPsq in
+ VMGrpMap.mapi
+ (fun grp hosts_q ->
+ Q.adjust host
+ (fun {vm_cnt; h_size} ->
+ let h_size = Int64.sub h_size vm_size in
+ let vm_cnt = vm_cnt + if grp = group then 1 else 0 in
+ {vm_cnt; h_size}
+ )
+ hosts_q
+ )
+ pool_state
+
+(* The no breach plan pool state determines the no breach evacuation planning
+ for anti-affinity VMs.
+ It's a VMGrpMap which contains "no breach plan state" for each VM anti-
+ affinity group.
+ "no breach plan state" has 2 elements:
+ 1. a Psq which contains "no_resident" hosts for that group. (A "no
+ resident" host for a group is a host which has no running VMs from that
+ group resident on it.)
+ 2. an int which is the number of "resident" hosts for each group. (A
+ "resident" host for a group is a host which has at least one running VM
+ from that group resident on it.)
+ Let's say the anti-affinity VM to be planned belongs to anti-affinity group
+ A. If for group A, the number of "resident" hosts is already 2 or greater
+ than 2, then we don't need to plan the VM on any host, if not, we will need
+ to check the host with the minimal free memory from the "no resident" hosts
+ queue, which is just the minimal host returned from "Psq.min" on the "no
+ resident" hosts Psq of group A. *)
+let init_no_breach_plan_pool_state spread_evenly_plan_pool_state =
+ let module Q = AntiAffEvacPlanHostPsq in
+ spread_evenly_plan_pool_state
+ |> VMGrpMap.map (fun hs ->
+ let no_resident_hosts, resident_hosts =
+ Q.partition (fun _ {vm_cnt; _} -> vm_cnt = 0) hs
+ in
+ (no_resident_hosts, Q.size resident_hosts)
+ )
+
+(* Update "no_breach_plan_pool_state" after a VM from anti-affinity "group"
+ with memory size: "vm_size" is planned on the "host":
+ 1. For the "group", the "host" is removed from its "no_resident" hosts
+ queue, and increase its "resident_hosts_cnt" by 1.
+ 2. For other groups, updates the host's size by substracting "vm_size" if
+ the host is in that group's "no_resident" hosts queue. *)
+let update_no_breach_plan_pool_state vm_size group host pool_state =
+ let module Q = AntiAffEvacPlanHostPsq in
+ VMGrpMap.mapi
+ (fun grp (no_resident_hosts, resident_hosts_cnt) ->
+ match grp = group with
+ | true ->
+ (Q.remove host no_resident_hosts, succ resident_hosts_cnt)
+ | false ->
+ let open HostStatistics in
+ ( Q.update host
+ (Option.map (fun {vm_cnt; h_size} ->
+ {vm_cnt; h_size= Int64.sub h_size vm_size}
+ )
+ )
+ no_resident_hosts
+ , resident_hosts_cnt
+ )
+ )
+ pool_state
+
+(* For an anti-affinity group, select host for a VM of memory size: vm_size
+ from hosts Psq: hosts_psq, returns the selected host for the VM and the
+ available hosts Psq for the remaining VMs to be planned in that anti-
+ affinity group. *)
+let rec select_host_for_anti_aff_evac_plan vm_size hosts_psq =
+ let module Q = AntiAffEvacPlanHostPsq in
+ match Q.pop hosts_psq with
+ | None ->
+ None
+ | Some ((host, {vm_cnt= _; h_size}), rest_hosts) -> (
+ match vm_size <= h_size with
+ | true ->
+ (* "host", the minimal one in "hosts_psq", might still be able to hold
+ the next VM: if its free memory, after the current VM is placed on
+ it, is still larger than the size of the next VM *)
+ Some (host, hosts_psq)
+ | false ->
+ (* "host" will not be available for the remaining VMs as the anti-
+ affinity VMs to be planned are sorted increasingly in terms of their
+ size, since the host can't hold current VM, it will not be able to
+ hold the next VM. *)
+ select_host_for_anti_aff_evac_plan vm_size rest_hosts
+ )
+
+let impossible_error_handler () =
+ let msg = "Data corrupted during host evacuation." in
+ error "%s" msg ;
+ raise (Api_errors.Server_error (Api_errors.internal_error, [msg]))
+
+(*****************************************************************************************************)
+(* Planning code follows *)
+
+(* Try to get a spread evenly plan for anti-affinity VMs (for each anti-
+ affinity group, the number of running VMs from that group are spread evenly
+ in all the rest hosts in the pool):
+ 1. For all the anti-affinity VMs which sorted in an increasing order in
+ terms of the VM's memory size, do host selection as below step 2.
+ 2. For each anti-affinity VM, select a host which can run it, and which has
+ the minimal number of VMs in the same anti-affinity group running on it,
+ for the hosts with the same number of running VMs in that group, pick the
+ one with the minimal free memory. *)
+let compute_spread_evenly_plan ~__context pool_state anti_aff_vms =
+ info "compute_spread_evenly_plan" ;
+ List.fold_left
+ (fun (acc_mapping, acc_pool_state) (vm, vm_size, group) ->
+ debug "Spread evenly plan: try to plan for anti-affinity VM (%s %s %s)."
+ (Ref.string_of vm)
+ (Db.VM.get_name_label ~__context ~self:vm)
+ (Db.VM_group.get_name_label ~__context ~self:group) ;
+ match
+ VMGrpMap.find group acc_pool_state
+ |> select_host_for_anti_aff_evac_plan vm_size
+ with
+ | None ->
+ debug
+ "Spread evenly plan: no host can hold this anti-affinity VM. Stop \
+ the planning as there won't be a valid plan for this VM." ;
+ ([], VMGrpMap.empty)
+ | Some (h, avail_hosts_for_group) ->
+ debug
+ "Spread evenly plan: choose the host with the minimal free memory \
+ which can run the vm: (%s %s)."
+ (Ref.string_of h)
+ (Db.Host.get_name_label ~__context ~self:h) ;
+ ( (vm, h) :: acc_mapping
+ , acc_pool_state
+ |> VMGrpMap.update group (fun _ -> Some avail_hosts_for_group)
+ |> update_spread_evenly_plan_pool_state vm_size group h
+ )
+ | exception Not_found ->
+ impossible_error_handler ()
+ )
+ ([], pool_state) anti_aff_vms
+ |> fst
+
+(* Try to get a no breach plan for anti-affinity VMs (for each anti-affinity
+ group, there are at least 2 hosts having running VMs in the group):
+ 1. For all the anti-affinity VMs which sorted in an increasing order in
+ terms of the VM's memory size, do host selection as below step 2.
+ 2. For each anti-affinity VM, try to select a host for it so that there are
+ at least 2 hosts which has running VMs in the same anti-affinity group.
+ If there are already 2 hosts having running VMs in that group, skip
+ planning for the VM. *)
+let compute_no_breach_plan ~__context pool_state anti_aff_vms =
+ info "compute_no_breach_plan" ;
+ List.fold_left
+ (fun (acc_mapping, acc_not_planned_vms, acc_pool_state) (vm, vm_size, group) ->
+ debug "No breach plan: try to plan for anti-affinity VM (%s %s %s)."
+ (Ref.string_of vm)
+ (Db.VM.get_name_label ~__context ~self:vm)
+ (Db.VM_group.get_name_label ~__context ~self:group) ;
+
+ match VMGrpMap.find group acc_pool_state with
+ | no_resident_hosts, resident_hosts_cnt when resident_hosts_cnt < 2 -> (
+ debug
+ "No breach plan: there are less than 2 hosts has running VM in the \
+ same anti-affinity group, and there are still host(s) which has 0 \
+ running VMs, try to plan for it." ;
+ match
+ select_host_for_anti_aff_evac_plan vm_size no_resident_hosts
+ with
+ | None ->
+ debug
+ "No breach plan: failed to select host on any of the no \
+ resident hosts, skip it, continue with the next VM." ;
+ (acc_mapping, (vm, vm_size) :: acc_not_planned_vms, acc_pool_state)
+ | Some (h, hosts) ->
+ debug
+ "No breach plan: choose the no resident host with the minimal \
+ free memory which can run the vm: (%s)."
+ (Db.Host.get_name_label ~__context ~self:h) ;
+ ( (vm, h) :: acc_mapping
+ , acc_not_planned_vms
+ , acc_pool_state
+ |> VMGrpMap.update group (Option.map (fun (_, i) -> (hosts, i)))
+ |> update_no_breach_plan_pool_state vm_size group h
+ )
+ )
+ | exception Not_found ->
+ impossible_error_handler ()
+ | _ ->
+ debug
+ "No breach plan: no need to plan for the VM as the number of hosts \
+ which has running VMs from the same group is no less than 2, \
+ continue to plan for the next one." ;
+ (acc_mapping, (vm, vm_size) :: acc_not_planned_vms, acc_pool_state)
+ )
+ ([], [], pool_state) anti_aff_vms
+ |> fun (plan, not_planned_vms, _) -> (plan, not_planned_vms)
+
+let vms_partition ~__context vms =
+ vms
+ |> List.partition_map (fun (vm, vm_size) ->
+ match Xapi_vm_helpers.vm_has_anti_affinity ~__context ~vm with
+ | Some (`AntiAffinity group) ->
+ Either.Left (vm, vm_size, group)
+ | _ ->
+ Either.Right (vm, vm_size)
+ )
+
+(* Return an evacuation plan respecting VM anti-affinity rules: it is done in 3
+ phases:
+ 1. Try to get a "spread evenly" plan for anti-affinity VMs, and then a
+ binpack plan for the rest of VMs. Done if every VM got planned, otherwise
+ continue.
+ 2. Try to get a "no breach" plan for anti-affinity VMs, and then a binpack
+ plan for the rest of VMs. Done if every VM got planned, otherwise
+ continue.
+ 3. Carry out a binpack plan ignoring VM anti-affinity. *)
+let compute_anti_aff_evac_plan ~__context total_hosts hosts vms =
let config =
{Binpack.hosts; vms; placement= []; total_hosts; num_failures= 1}
in
Binpack.check_configuration config ;
- debug "Planning configuration for offline agile VMs = %s"
- (Binpack.string_of_configuration
- (fun x ->
- Printf.sprintf "%s (%s)" (Ref.short_string_of x)
- (Db.Host.get_hostname ~__context ~self:x)
- )
- (fun x ->
- Printf.sprintf "%s (%s)" (Ref.short_string_of x)
- (Db.VM.get_name_label ~__context ~self:x)
- )
- config
- ) ;
+
+ let binpack_plan ~__context config vms =
+ debug "Binpack planning configuration = %s"
+ (Binpack.string_of_configuration
+ (fun x ->
+ Printf.sprintf "%s (%s)" (Ref.short_string_of x)
+ (Db.Host.get_name_label ~__context ~self:x)
+ )
+ (fun x ->
+ Printf.sprintf "%s (%s)" (Ref.short_string_of x)
+ (Db.VM.get_name_label ~__context ~self:x)
+ )
+ config
+ ) ;
+ debug "VMs to attempt to evacuate: [ %s ]"
+ (String.concat "; "
+ (vms
+ |> List.map (fun (self, _) ->
+ Printf.sprintf "%s (%s)" (Ref.short_string_of self)
+ (Db.VM.get_name_label ~__context ~self)
+ )
+ )
+ ) ;
+ let h = Binpack.choose_heuristic config in
+ h.Binpack.get_specific_plan config (List.map fst vms)
+ in
+
+ let binpack_after_plan_applied plan not_planned_vms =
+ match plan with
+ | [] ->
+ None
+ | plan -> (
+ debug "Binpack for the rest VMs" ;
+ let config_after_plan_applied = Binpack.apply_plan config plan in
+ let config_after_plan_applied =
+ {config_after_plan_applied with vms= not_planned_vms}
+ in
+ let b_plan =
+ binpack_plan ~__context config_after_plan_applied not_planned_vms
+ in
+ match List.length b_plan = List.length not_planned_vms with
+ | true ->
+ debug "Got final plan." ;
+ Some (plan @ b_plan)
+ | false ->
+ debug
+ "Failed to get final plan as failed to binpack for all the rest \
+ VMs." ;
+ None
+ )
+ in
+
+ match
+ (Pool_features.is_enabled ~__context Features.VM_groups, total_hosts)
+ with
+ | _, h when h < 3 ->
+ debug
+ "There are less than 2 available hosts to migrate VMs to, \
+ anti-affinity evacuation plan is not needed." ;
+ binpack_plan ~__context config vms
+ | false, _ ->
+ debug
+ "VM groups feature is disabled, ignore VM anti-affinity during host \
+ evacuation" ;
+ binpack_plan ~__context config vms
+ | true, _ ->
+ let anti_aff_vms, non_anti_aff_vms = vms |> vms_partition ~__context in
+ let spread_evenly_plan_pool_state =
+ init_spread_evenly_plan_pool_state ~__context anti_aff_vms hosts
+ in
+ let anti_aff_vms_increasing =
+ anti_aff_vms |> List.sort (fun (_, a, _) (_, b, _) -> compare a b)
+ in
+
+ let ( let* ) o f = match o with None -> f None | p -> p in
+ (let* _no_plan =
+ let plan =
+ compute_spread_evenly_plan ~__context spread_evenly_plan_pool_state
+ anti_aff_vms_increasing
+ in
+ binpack_after_plan_applied plan non_anti_aff_vms
+ in
+ let* _no_plan =
+ let plan, not_planned_vms =
+ compute_no_breach_plan ~__context
+ (init_no_breach_plan_pool_state spread_evenly_plan_pool_state)
+ anti_aff_vms_increasing
+ in
+ binpack_after_plan_applied plan (non_anti_aff_vms @ not_planned_vms)
+ in
+ binpack_plan ~__context config vms |> Option.some
+ )
+ |> Option.value ~default:[]
+
+(** Return a VM -> Host plan for the Host.evacuate code. We assume the VMs are all agile. The returned plan may
+ be incomplete if there was not enough memory. *)
+let compute_evacuation_plan ~__context total_hosts remaining_hosts
+ vms_and_snapshots =
debug "VMs to attempt to evacuate: [ %s ]"
(String.concat "; "
(List.map
@@ -239,8 +604,17 @@ let compute_evacuation_plan ~__context total_hosts remaining_hosts
vms_and_snapshots
)
) ;
- let h = Binpack.choose_heuristic config in
- h.Binpack.get_specific_plan config (List.map fst vms_and_snapshots)
+ let hosts =
+ List.map
+ (fun host -> (host, host_free_memory ~__context ~host))
+ remaining_hosts
+ in
+ let vms =
+ List.map
+ (fun (vm, snapshot) -> (vm, vm_memory ~__context snapshot))
+ vms_and_snapshots
+ in
+ compute_anti_aff_evac_plan ~__context total_hosts hosts vms
(** Passed to the planner to reason about other possible configurations, used to block operations which would
destroy the HA VM restart plan. *)
diff --git a/ocaml/xapi/xapi_ha_vm_failover.mli b/ocaml/xapi/xapi_ha_vm_failover.mli
index 89a4c3d20e5..20eb3b6b844 100644
--- a/ocaml/xapi/xapi_ha_vm_failover.mli
+++ b/ocaml/xapi/xapi_ha_vm_failover.mli
@@ -86,3 +86,51 @@ val assert_nfailures_change_preserves_ha_plan :
__context:Context.t -> int -> unit
val assert_new_vm_preserves_ha_plan : __context:Context.t -> API.ref_VM -> unit
+
+(* Below exposed only for ease of testing *)
+
+module VMGrpMap : Map.S with type key = API.ref_VM_group
+
+module HostKey : sig
+ type t = API.ref_host
+end
+
+module AntiAffEvacPlanHostPsq : Psq.S with type k = HostKey.t
+
+val compute_spread_evenly_plan :
+ __context:Context.t
+ -> AntiAffEvacPlanHostPsq.t VMGrpMap.t
+ -> (API.ref_VM * int64 * API.ref_VM_group) list
+ -> (API.ref_VM * API.ref_host) list
+
+val compute_no_breach_plan :
+ __context:Context.t
+ -> (AntiAffEvacPlanHostPsq.t * int) VMGrpMap.t
+ -> (API.ref_VM * int64 * API.ref_VM_group) list
+ -> (API.ref_VM * API.ref_host) list * (API.ref_VM * int64) list
+
+val compute_anti_aff_evac_plan :
+ __context:Context.t
+ -> int
+ -> (API.ref_host * int64) list
+ -> (API.ref_VM * int64) list
+ -> (API.ref_VM * API.ref_host) list
+
+val host_free_memory : __context:Context.t -> host:API.ref_host -> int64
+
+val vm_memory : __context:Context.t -> API.vM_t -> int64
+
+val vms_partition :
+ __context:Context.t
+ -> (API.ref_VM * 'a) list
+ -> (API.ref_VM * 'a * API.ref_VM_group) list * (API.ref_VM * 'a) list
+
+val init_spread_evenly_plan_pool_state :
+ __context:Context.t
+ -> ('a * 'b * API.ref_VM_group) list
+ -> (API.ref_host * int64) list
+ -> AntiAffEvacPlanHostPsq.t VMGrpMap.t
+
+val init_no_breach_plan_pool_state :
+ AntiAffEvacPlanHostPsq.t VMGrpMap.t
+ -> (AntiAffEvacPlanHostPsq.t * int) VMGrpMap.t
diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml
index 01b76be3d85..7e767dbd035 100644
--- a/ocaml/xapi/xapi_host.ml
+++ b/ocaml/xapi/xapi_host.ml
@@ -1109,7 +1109,7 @@ let destroy ~__context ~self =
Db.Host.destroy ~__context ~self ;
Create_misc.create_pool_cpuinfo ~__context ;
List.iter (fun vm -> Db.VM.destroy ~__context ~self:vm) my_control_domains ;
- Pool_features.update_pool_features ~__context
+ Pool_features_helpers.update_pool_features ~__context
let declare_dead ~__context ~host =
precheck_destroy_declare_dead ~__context ~self:host "declare_dead" ;
@@ -2025,7 +2025,7 @@ let copy_license_to_db ~__context ~host:_ ~features ~additional =
let set_license_params ~__context ~self ~value =
Db.Host.set_license_params ~__context ~self ~value ;
- Pool_features.update_pool_features ~__context
+ Pool_features_helpers.update_pool_features ~__context
let apply_edition_internal ~__context ~host ~edition ~additional =
(* Get localhost's current license state. *)
diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml
index 2d3a13304c7..ee08e18cf95 100644
--- a/ocaml/xapi/xapi_pool.ml
+++ b/ocaml/xapi/xapi_pool.ml
@@ -775,7 +775,9 @@ let pre_join_checks ~__context ~rpc ~session_id ~force =
list
|> List.to_seq
|> Seq.map (fun (_, record) ->
- (record.API.certificate_name, record.API.certificate_fingerprint)
+ ( record.API.certificate_name
+ , record.API.certificate_fingerprint_sha256
+ )
)
|> CertMap.of_seq
in
@@ -1992,7 +1994,7 @@ let eject ~__context ~host =
Create_misc.create_pool_cpuinfo ~__context ;
(* Update pool features, in case this host had a different license to the
* rest of the pool. *)
- Pool_features.update_pool_features ~__context
+ Pool_features_helpers.update_pool_features ~__context
| true, true ->
raise Cannot_eject_master
diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml
index 9e510baa4de..8a03aba27e1 100644
--- a/ocaml/xapi/xapi_vm.ml
+++ b/ocaml/xapi/xapi_vm.ml
@@ -1441,6 +1441,26 @@ let set_appliance ~__context ~self ~value =
(* Update the VM's allowed operations - this will update the new appliance's operations, if valid. *)
update_allowed_operations ~__context ~self
+let set_groups ~__context ~self ~value =
+ Pool_features.assert_enabled ~__context ~f:Features.VM_groups ;
+ if
+ Db.VM.get_is_control_domain ~__context ~self
+ || Db.VM.get_is_a_template ~__context ~self
+ || Db.VM.get_is_a_snapshot ~__context ~self
+ then
+ raise
+ (Api_errors.Server_error
+ ( Api_errors.operation_not_allowed
+ , [
+ "Control domains, templates, and snapshots cannot be added to VM \
+ groups."
+ ]
+ )
+ ) ;
+ if List.length value > 1 then
+ raise Api_errors.(Server_error (Api_errors.too_many_groups, [])) ;
+ Db.VM.set_groups ~__context ~self ~value
+
let import_convert ~__context ~_type ~username ~password ~sr ~remote_config =
let open Vpx in
let print_jobInstance (j : Vpx.jobInstance) =
diff --git a/ocaml/xapi/xapi_vm.mli b/ocaml/xapi/xapi_vm.mli
index 273d0d6f2ca..19a737755e0 100644
--- a/ocaml/xapi/xapi_vm.mli
+++ b/ocaml/xapi/xapi_vm.mli
@@ -372,6 +372,9 @@ val set_suspend_VDI :
val set_appliance :
__context:Context.t -> self:API.ref_VM -> value:API.ref_VM_appliance -> unit
+val set_groups :
+ __context:Context.t -> self:API.ref_VM -> value:API.ref_VM_group_set -> unit
+
val import_convert :
__context:Context.t
-> _type:string
diff --git a/ocaml/xapi/xapi_vm_group.ml b/ocaml/xapi/xapi_vm_group.ml
new file mode 100644
index 00000000000..f2a7497737b
--- /dev/null
+++ b/ocaml/xapi/xapi_vm_group.ml
@@ -0,0 +1,29 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+module D = Debug.Make (struct let name = "xapi_vm_group" end)
+
+let create ~__context ~name_label ~name_description ~placement =
+ Pool_features.assert_enabled ~__context ~f:Features.VM_groups ;
+ let uuid = Uuidx.make () in
+ let ref = Ref.make () in
+ Db.VM_group.create ~__context ~ref ~uuid:(Uuidx.to_string uuid) ~name_label
+ ~name_description ~placement ;
+ ref
+
+let destroy ~__context ~self =
+ List.iter
+ (fun vm -> Db.VM.remove_groups ~__context ~self:vm ~value:self)
+ (Db.VM_group.get_VMs ~__context ~self) ;
+ Db.VM_group.destroy ~__context ~self
diff --git a/ocaml/xapi/xapi_vm_group.mli b/ocaml/xapi/xapi_vm_group.mli
new file mode 100644
index 00000000000..5ea43acc204
--- /dev/null
+++ b/ocaml/xapi/xapi_vm_group.mli
@@ -0,0 +1,22 @@
+(*
+ * Copyright (c) Cloud Software Group, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+val create :
+ __context:Context.t
+ -> name_label:string
+ -> name_description:string
+ -> placement:API.placement_policy
+ -> [`VM_group] Ref.t
+
+val destroy : __context:Context.t -> self:[`VM_group] Ref.t -> unit
diff --git a/ocaml/xapi/xapi_vm_group_helpers.ml b/ocaml/xapi/xapi_vm_group_helpers.ml
new file mode 100644
index 00000000000..87fc15b10b5
--- /dev/null
+++ b/ocaml/xapi/xapi_vm_group_helpers.ml
@@ -0,0 +1,213 @@
+(*
+ * Copyright (c) 2024 Cloud Software Group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+module D = Debug.Make (struct let name = "xapi_vm_group_helpers" end)
+
+open D
+
+(* Check the breach state of a group.
+ When there are no VMs or only one VM in the group, it is not considered a
+ breach.
+ when there are two or more VMs and all of them are on the same host, it is
+ considered a breach, and the specific host is returned.
+*)
+let check_breach_on_vm_anti_affinity_rules ~__context ~group =
+ Db.VM_group.get_VMs ~__context ~self:group
+ |> List.filter_map (fun vm ->
+ let vm_rec = Db.VM.get_record ~__context ~self:vm in
+ match (vm_rec.API.vM_power_state, vm_rec.API.vM_resident_on) with
+ | `Running, h when h <> Ref.null ->
+ Some h
+ | _ ->
+ None
+ )
+ |> function
+ | [] | [_] ->
+ None
+ | h :: remaining ->
+ if List.exists (fun h' -> h' <> h) remaining then
+ None
+ else
+ Some h
+
+let report_anti_affinity_alert ~__context ~group ~host =
+ let group_uuid = Db.VM_group.get_uuid ~__context ~self:group in
+ let host_uuid = Db.Host.get_uuid ~__context ~self:host in
+ let body =
+ String.concat ""
+ [
+ "Breach on VM anti-affinity rules "
+ ; group_uuid
+ ; " "
+ ; host_uuid
+ ; " "
+ ]
+ in
+ let obj_uuid =
+ Db.Pool.get_uuid ~__context ~self:(Helpers.get_pool ~__context)
+ in
+ Xapi_alert.add
+ ~msg:Api_messages.all_running_vms_in_anti_affinity_grp_on_single_host
+ ~cls:`Pool ~obj_uuid ~body
+
+let get_anti_affinity_alerts ~__context =
+ Helpers.call_api_functions ~__context (fun rpc session_id ->
+ Client.Client.Message.get_all_records ~rpc ~session_id
+ )
+ |> List.filter (fun (_, record) ->
+ record.API.message_name
+ = fst Api_messages.all_running_vms_in_anti_affinity_grp_on_single_host
+ )
+
+let alert_matched ~__context ~label_name ~id alert =
+ let alert_rec = snd alert in
+ match Xml.parse_string alert_rec.API.message_body with
+ | Xml.Element ("body", _, children) -> (
+ let filtered =
+ List.filter_map
+ (function
+ | Xml.Element (name, _, [Xml.PCData v]) when name = label_name ->
+ Some v
+ | _ ->
+ None
+ )
+ children
+ in
+ match filtered with [uuid] when uuid = id -> true | _ -> false
+ )
+ | _ ->
+ let msg = "Invalid message body of VM group alert" in
+ error "%s" msg ;
+ raise Api_errors.(Server_error (internal_error, [msg]))
+ | exception e ->
+ let msg = Printf.sprintf "%s" (ExnHelper.string_of_exn e) in
+ error "%s" msg ;
+ raise Api_errors.(Server_error (internal_error, [msg]))
+
+let filter_alerts_with_group ~__context ~group ~alerts =
+ let group_uuid = Db.VM_group.get_uuid ~__context ~self:group in
+ List.filter
+ (alert_matched ~__context ~label_name:"VM_group" ~id:group_uuid)
+ alerts
+
+let filter_alerts_with_host ~__context ~host ~alerts =
+ let host_uuid = Db.Host.get_uuid ~__context ~self:host in
+ List.filter (alert_matched ~__context ~label_name:"host" ~id:host_uuid) alerts
+
+(* If it is a breach and no alerts exist, generate one,
+ If it is not a breach and alerts exist, dismiss the existing alert *)
+let update_vm_anti_affinity_alert_for_group ~__context ~group ~alerts =
+ let breach_on_host =
+ check_breach_on_vm_anti_affinity_rules ~__context ~group
+ in
+ debug "[Anti-affinity] existing alerts of group (UUID: %s) is: %d"
+ (Db.VM_group.get_uuid ~__context ~self:group)
+ (List.length alerts) ;
+ match (alerts, breach_on_host) with
+ | [], Some host ->
+ report_anti_affinity_alert ~__context ~group ~host
+ | alerts, None ->
+ List.iter
+ (fun (ref, _) ->
+ Helpers.call_api_functions ~__context (fun rpc session_id ->
+ Client.Client.Message.destroy ~rpc ~session_id ~self:ref
+ )
+ )
+ alerts
+ | alerts, Some host when filter_alerts_with_host ~__context ~host ~alerts = []
+ ->
+ List.iter
+ (fun (ref, _) ->
+ Helpers.call_api_functions ~__context (fun rpc session_id ->
+ Client.Client.Message.destroy ~rpc ~session_id ~self:ref
+ )
+ )
+ alerts ;
+ report_anti_affinity_alert ~__context ~group ~host
+ | _, _ ->
+ ()
+
+let maybe_update_vm_anti_affinity_alert_for_vm ~__context ~vm =
+ if Pool_features.is_enabled ~__context Features.VM_groups then
+ try
+ Db.VM.get_groups ~__context ~self:vm
+ |> List.filter (fun g ->
+ Db.VM_group.get_placement ~__context ~self:g = `anti_affinity
+ )
+ |> function
+ | [] ->
+ ()
+ | group :: _ ->
+ let alerts = get_anti_affinity_alerts ~__context in
+ let alerts_of_group =
+ filter_alerts_with_group ~__context ~group ~alerts
+ in
+ update_vm_anti_affinity_alert_for_group ~__context ~group
+ ~alerts:alerts_of_group
+ with e -> error "%s" (Printexc.to_string e)
+ else
+ debug "VM group feature is disabled, alert will not be updated"
+
+let remove_vm_anti_affinity_alert_for_group ~__context ~group ~alerts =
+ debug "[Anti-affinity] remove alert for group:%s"
+ (Db.VM_group.get_uuid ~__context ~self:group) ;
+ List.iter
+ (fun (ref, _) ->
+ Helpers.call_api_functions ~__context (fun rpc session_id ->
+ Client.Client.Message.destroy ~rpc ~session_id ~self:ref
+ )
+ )
+ alerts
+
+let update_alert ~__context ~groups ~action =
+ try
+ let alerts = get_anti_affinity_alerts ~__context in
+ groups
+ |> List.filter (fun g ->
+ Db.VM_group.get_placement ~__context ~self:g = `anti_affinity
+ )
+ |> List.iter (fun group ->
+ let alerts_of_group =
+ filter_alerts_with_group ~__context ~group ~alerts
+ in
+ action ~__context ~group ~alerts:alerts_of_group
+ )
+ with e -> error "%s" (Printexc.to_string e)
+
+let update_vm_anti_affinity_alert ~__context ~groups =
+ if Pool_features.is_enabled ~__context Features.VM_groups then
+ update_alert ~__context ~groups
+ ~action:update_vm_anti_affinity_alert_for_group
+ else
+ debug "VM group feature is disabled, alert will not be updated"
+
+let remove_vm_anti_affinity_alert ~__context ~groups =
+ update_alert ~__context ~groups
+ ~action:remove_vm_anti_affinity_alert_for_group
+
+let maybe_update_alerts_on_feature_change ~__context ~old_restrictions
+ ~new_restrictions =
+ try
+ let is_enabled restrictions =
+ List.mem Features.VM_groups (Features.of_assoc_list restrictions)
+ in
+ let groups = Db.VM_group.get_all ~__context in
+ match (is_enabled old_restrictions, is_enabled new_restrictions) with
+ | false, true ->
+ update_vm_anti_affinity_alert ~__context ~groups
+ | true, false ->
+ remove_vm_anti_affinity_alert ~__context ~groups
+ | _, _ ->
+ ()
+ with e -> error "%s" (Printexc.to_string e)
diff --git a/ocaml/xapi/xapi_vm_group_helpers.mli b/ocaml/xapi/xapi_vm_group_helpers.mli
new file mode 100644
index 00000000000..e2800ee69db
--- /dev/null
+++ b/ocaml/xapi/xapi_vm_group_helpers.mli
@@ -0,0 +1,44 @@
+(*
+ * Copyright (c) 2024 Cloud Software Group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *)
+
+val maybe_update_vm_anti_affinity_alert_for_vm :
+ __context:Context.t -> vm:[`VM] API.Ref.t -> unit
+(** updates VM anti-affinity alert with a given VM.*)
+
+val remove_vm_anti_affinity_alert :
+ __context:Context.t -> groups:[`VM_group] API.Ref.t list -> unit
+(** removes VM anti-affinity alert with given groups.*)
+
+val update_vm_anti_affinity_alert :
+ __context:Context.t -> groups:[`VM_group] API.Ref.t list -> unit
+(** updates VM anti-affinity alert with given groups.*)
+
+val maybe_update_alerts_on_feature_change :
+ __context:Context.t
+ -> old_restrictions:(string * string) list
+ -> new_restrictions:(string * string) list
+ -> unit
+(** Updates the VM anti-affinity alert only when Features.VM_groups changes.
+
+ @param __context The context information.
+ @param old_restrictions The old feature restrictions represented as an association list.
+ Each entry in the list contains a feature identifier and its corresponding restriction status.
+ @param new_restrictions The new feature restrictions represented as an association list.
+ Each entry in the list contains a feature identifier and its corresponding restriction status.
+ Example:
+ [
+ ("restrict_vlan", "true");
+ ("restrict_vm_group", "false")
+ ]
+*)
diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml
index d8b9855686e..989686ca2dc 100644
--- a/ocaml/xapi/xapi_vm_helpers.ml
+++ b/ocaml/xapi/xapi_vm_helpers.ml
@@ -35,6 +35,12 @@ module SRSet = Set.Make (struct
let compare = Stdlib.compare
end)
+module HostMap = Map.Make (struct
+ type t = [`host] Ref.t
+
+ let compare = Ref.compare
+end)
+
let compute_memory_overhead ~__context ~vm =
let vm_record = Db.VM.get_record ~__context ~self:vm in
Memory_check.vm_compute_memory_overhead ~vm_record
@@ -114,7 +120,9 @@ let set_is_a_template ~__context ~self ~value =
)
|> List.rev
|> List.iter (fun p -> Db.PVS_proxy.destroy ~__context ~self:p) ;
- (* delete the vm metrics associated with the vm if it exists, when we templat'ize it *)
+ (* Remove from any VM groups when we templatize it *)
+ Db.VM.set_groups ~__context ~self ~value:[] ;
+ (* Delete the vm metrics associated with the vm if it exists, when we templatize it *)
finally
(fun () -> Db.VM_metrics.destroy ~__context ~self:m)
(fun () -> Db.VM.set_metrics ~__context ~self ~value:Ref.null)
@@ -904,6 +912,26 @@ let vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check host =
&& host_evacuate_in_progress
with _ -> false
+let vm_has_anti_affinity ~__context ~vm =
+ if Pool_features.is_enabled ~__context Features.VM_groups then
+ List.find_opt
+ (fun g -> Db.VM_group.get_placement ~__context ~self:g = `anti_affinity)
+ (Db.VM.get_groups ~__context ~self:vm)
+ |> Option.map (fun group ->
+ debug
+ "The VM (uuid %s) is associated with an anti-affinity group \
+ (uuid: %s, name: %s)"
+ (Db.VM.get_uuid ~__context ~self:vm)
+ (Db.VM_group.get_uuid ~__context ~self:group)
+ (Db.VM_group.get_name_label ~__context ~self:group) ;
+ `AntiAffinity group
+ )
+ else (
+ debug
+ "VM group feature is disabled, ignore VM anti-affinity during VM start" ;
+ None
+ )
+
let vm_has_vgpu ~__context ~vm =
match Db.VM.get_VGPUs ~__context ~self:vm with
| [] ->
@@ -923,7 +951,11 @@ let vm_has_sriov ~__context ~vm =
let ( >>= ) opt f = match opt with Some _ as v -> v | None -> f
let get_group_key ~__context ~vm =
- match None >>= vm_has_vgpu ~__context ~vm >>= vm_has_sriov ~__context ~vm with
+ match
+ vm_has_anti_affinity ~__context ~vm
+ >>= vm_has_vgpu ~__context ~vm
+ >>= vm_has_sriov ~__context ~vm
+ with
| Some x ->
x
| None ->
@@ -969,12 +1001,112 @@ let rank_hosts_by_best_vgpu ~__context vgpu visible_hosts =
0L
)
hosts
- |> List.map (fun g -> List.map (fun (h, _) -> h) g)
+ |> List.map (fun g -> List.map fst g)
+
+let host_to_vm_count_map ~__context group =
+ let host_of_vm vm =
+ let vm_rec = Db.VM.get_record ~__context ~self:vm in
+ (* 1. When a VM starts migrating, it's 'scheduled_to_be_resident_on' will be set,
+ while its 'resident_on' is not cleared. In this case,
+ 'scheduled_to_be_resident_on' should be treated as its running host.
+ 2. For paused VM, its 'resident_on' has value, but it will not be considered
+ while computing the amount of VMs. *)
+ match
+ ( vm_rec.API.vM_scheduled_to_be_resident_on
+ , vm_rec.API.vM_resident_on
+ , vm_rec.API.vM_power_state
+ )
+ with
+ | sh, _, _ when sh <> Ref.null ->
+ Some sh
+ | _, h, `Running when h <> Ref.null ->
+ Some h
+ | _ ->
+ None
+ in
+ Db.VM_group.get_VMs ~__context ~self:group
+ |> List.fold_left
+ (fun m vm ->
+ match host_of_vm vm with
+ | Some h ->
+ HostMap.update h
+ (fun c -> Option.(value ~default:0 c |> succ |> some))
+ m
+ | None ->
+ m
+ )
+ HostMap.empty
+
+let rank_hosts_by_vm_cnt_in_group ~__context group hosts =
+ let host_map = host_to_vm_count_map ~__context group in
+ Helpers.group_by ~ordering:`ascending
+ (fun h -> HostMap.find_opt h host_map |> Option.value ~default:0)
+ hosts
+
+let get_affinity_host ~__context ~vm =
+ match Db.VM.get_affinity ~__context ~self:vm with
+ | ref when Db.is_valid_ref __context ref ->
+ Some ref
+ | _ ->
+ None
+
+(* Group all hosts to 2 parts:
+ 1. A list of affinity host (only one host).
+ 2. A list of lists, each list contains hosts with the same number of
+ running VM in that anti-affinity group.
+ These lists are sorted by VM's count.
+ Combine these lists into one list. The list is like below:
+ [ [host1] (affinity host)
+ , [host2, host3] (no VM running)
+ , [host4, host5] (one VM running)
+ , [host6, host7] (more VMs running)
+ , ...
+ ]
+*)
+let rank_hosts_by_placement ~__context ~vm ~group =
+ let hosts = Db.Host.get_all ~__context in
+ let affinity_host = get_affinity_host ~__context ~vm in
+ let hosts_without_affinity =
+ Option.fold ~none:hosts
+ ~some:(fun host -> List.filter (( <> ) host) hosts)
+ affinity_host
+ in
+ let sorted_hosts =
+ hosts_without_affinity
+ |> rank_hosts_by_vm_cnt_in_group ~__context group
+ |> List.(map (map fst))
+ in
+ match affinity_host with
+ | Some host ->
+ [host] :: sorted_hosts
+ | None ->
+ sorted_hosts
+
+let rec select_host_from_ranked_lists ~vm ~host_selector ~ranked_host_lists =
+ match ranked_host_lists with
+ | [] ->
+ raise (Api_errors.Server_error (Api_errors.no_hosts_available, []))
+ | hosts :: less_optimal_groups_of_hosts -> (
+ let hosts_str = String.concat ";" (List.map Ref.string_of hosts) in
+ debug
+ "Attempting to select host for VM (%s) in a group of equally optimal \
+ hosts [ %s ]"
+ (Ref.string_of vm) hosts_str ;
+ try host_selector hosts
+ with _ ->
+ info
+ "Failed to select host for VM (%s) in any of [ %s ], continue to \
+ select from less optimal hosts"
+ (Ref.string_of vm) hosts_str ;
+ select_host_from_ranked_lists ~vm ~host_selector
+ ~ranked_host_lists:less_optimal_groups_of_hosts
+ )
(* Selects a single host from the set of all hosts on which the given [vm] can boot.
Raises [Api_errors.no_hosts_available] if no such host exists.
- 1.Take Vgpu or Network SR-IOV as a group_key for group all hosts into host list list
- 2.helper function's order determine the priority of resources,now vgpu has higher priority than Network SR-IOV
+ 1.Take anti-affinity, or VGPU, or Network SR-IOV as a group_key for group all hosts into host list list
+ 2.helper function's order determine the priority of resources,now anti-affinity has the highest priority,
+ VGPU is the second, Network SR-IOV is the lowest
3.If no key found in VM,then host_lists will be [all_hosts] *)
let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot =
let validate_host =
@@ -986,6 +1118,8 @@ let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot =
match group_key with
| `Other ->
[all_hosts]
+ | `AntiAffinity group ->
+ rank_hosts_by_placement ~__context ~vm ~group
| `VGPU vgpu ->
let can_host_vm ~__context host vm =
try
@@ -1000,7 +1134,7 @@ let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot =
let host_group =
Xapi_network_sriov_helpers.group_hosts_by_best_sriov ~__context
~network
- |> List.map (fun g -> List.map (fun (h, _) -> h) g)
+ |> List.map (fun g -> List.map fst g)
in
if host_group <> [] then
host_group
@@ -1012,22 +1146,12 @@ let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot =
)
)
in
- let rec select_host_from = function
- | [] ->
- raise (Api_errors.Server_error (Api_errors.no_hosts_available, []))
- | hosts :: less_optimal_groups_of_hosts -> (
- debug
- "Attempting to start VM (%s) on one of equally optimal hosts [ %s ]"
- (Ref.string_of vm)
- (String.concat ";" (List.map Ref.string_of hosts)) ;
- try Xapi_vm_placement.select_host __context vm validate_host hosts
- with _ ->
- info "Failed to start VM (%s) on any of [ %s ]" (Ref.string_of vm)
- (String.concat ";" (List.map Ref.string_of hosts)) ;
- select_host_from less_optimal_groups_of_hosts
- )
+ let host_selector =
+ Xapi_vm_placement.select_host __context vm validate_host
in
- try select_host_from host_lists
+ try
+ select_host_from_ranked_lists ~vm ~host_selector
+ ~ranked_host_lists:host_lists
with
| Api_errors.Server_error (x, []) when x = Api_errors.no_hosts_available ->
debug
diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml
index ccee66500cd..2f6130641df 100644
--- a/ocaml/xapi/xapi_vm_lifecycle.ml
+++ b/ocaml/xapi/xapi_vm_lifecycle.ml
@@ -850,6 +850,7 @@ let remove_pending_guidance ~__context ~self ~value =
2. Called on update VM when the power state changes *)
let force_state_reset_keep_current_operations ~__context ~self ~value:state =
(* First update the power_state. Some operations below indirectly rely on this. *)
+ let old_state = Db.VM.get_power_state ~__context ~self in
Db.VM.set_power_state ~__context ~self ~value:state ;
if state = `Suspended then
remove_pending_guidance ~__context ~self ~value:`restart_device_model ;
@@ -941,6 +942,9 @@ let force_state_reset_keep_current_operations ~__context ~self ~value:state =
(Db.PCI.get_all ~__context)
) ;
update_allowed_operations ~__context ~self ;
+ if old_state <> state && (old_state = `Running || state = `Running) then
+ Xapi_vm_group_helpers.maybe_update_vm_anti_affinity_alert_for_vm ~__context
+ ~vm:self ;
if state = `Halted then (* archive the rrd for this vm *)
let vm_uuid = Db.VM.get_uuid ~__context ~self in
let master_address = Pool_role.get_master_address_opt () in
diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml
index 8fb445aace1..e57ef22fbad 100644
--- a/ocaml/xapi/xapi_vm_migrate.ml
+++ b/ocaml/xapi/xapi_vm_migrate.ml
@@ -500,7 +500,9 @@ let pool_migrate_complete ~__context ~vm ~host:_ =
Xapi_xenops.add_caches id ;
Xapi_xenops.refresh_vm ~__context ~self:vm ;
Monitor_dbcalls_cache.clear_cache_for_vm ~vm_uuid:id
- )
+ ) ;
+ Xapi_vm_group_helpers.maybe_update_vm_anti_affinity_alert_for_vm ~__context
+ ~vm
type mirror_record = {
mr_mirrored: bool
diff --git a/ocaml/xapi/xapi_vm_snapshot.ml b/ocaml/xapi/xapi_vm_snapshot.ml
index 747fd68deb3..49f745a8845 100644
--- a/ocaml/xapi/xapi_vm_snapshot.ml
+++ b/ocaml/xapi/xapi_vm_snapshot.ml
@@ -385,6 +385,7 @@ let do_not_copy =
"snapshots"
; "tags"
; "affinity"
+ ; "groups"
; (* Current fields should remain to get destroyed during revert process *)
"consoles"
; "VBDs"
diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml
index 33bcbb7a958..a186b2e8b76 100644
--- a/ocaml/xapi/xapi_xenops.ml
+++ b/ocaml/xapi/xapi_xenops.ml
@@ -3402,7 +3402,7 @@ let transform_xenops_exn ~__context ~vm queue_name f =
Backtrace.reraise e e'
in
let internal fmt =
- Printf.kprintf (fun x -> reraise Api_errors.internal_error [x]) fmt
+ Printf.ksprintf (fun x -> reraise Api_errors.internal_error [x]) fmt
in
match e with
| Xenopsd_error e' -> (
diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml
index fede0f4b0d7..80691b0ab9d 100644
--- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml
+++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml
@@ -580,7 +580,7 @@ let bytes_per_mem_vm = 1024
let mem_vm_writer_pages = ((max_supported_vms * bytes_per_mem_vm) + 4095) / 4096
-let res_error fmt = Printf.kprintf Result.error fmt
+let res_error fmt = Printf.ksprintf Result.error fmt
let ok x = Result.ok x
diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion
index ce9d5ae5f11..e697d777e37 100644
--- a/ocaml/xe-cli/bash-completion
+++ b/ocaml/xe-cli/bash-completion
@@ -114,6 +114,7 @@ _xe()
pvs-site-*|\
sdn-controller-*|\
network-sriov-*|\
+ vm-group-*|\
cluster-host-*)
# Chop off at the second '-' and append 'list'
cmd="$(echo ${OLDSTYLE_WORDS[1]} | cut -d- -f1-2)-list";;
@@ -387,6 +388,12 @@ _xe()
return 0
;;
+ placement) # for vm-group-create
+ IFS=$'\n,'
+ set_completions 'normal,anti-affinity' "$value"
+ return 0
+ ;;
+
*)
snd=`echo ${param} | gawk -F- '{print $NF}'`
fst=`echo ${param} | gawk -F- '{printf "%s", $1; for (i=2; i output_string c s | None -> ()
in
- Printf.kprintf printer fmt
+ Printf.ksprintf printer fmt
(* usage message *)
exception Usage
diff --git a/ocaml/xenopsd/lib/xenops_server.ml b/ocaml/xenopsd/lib/xenops_server.ml
index 30fc7ea16ac..c7fc910ea33 100644
--- a/ocaml/xenopsd/lib/xenops_server.ml
+++ b/ocaml/xenopsd/lib/xenops_server.ml
@@ -24,7 +24,7 @@ module D = Debug.Make (struct let name = "xenops_server" end)
open D
let internal_error fmt =
- Printf.kprintf
+ Printf.ksprintf
(fun str ->
error "%s" str ;
raise (Xenopsd_error (Internal_error str))
diff --git a/ocaml/xenopsd/pvs/pvs_proxy_setup.ml b/ocaml/xenopsd/pvs/pvs_proxy_setup.ml
index 8e73cc91696..f7f6f70cb87 100644
--- a/ocaml/xenopsd/pvs/pvs_proxy_setup.ml
+++ b/ocaml/xenopsd/pvs/pvs_proxy_setup.ml
@@ -30,7 +30,7 @@ end)
module XS = Ezxenstore_core.Xenstore
let error fmt =
- Printf.kprintf
+ Printf.ksprintf
(fun msg ->
D.error "%s" msg ;
Result.error (`Msg msg)
diff --git a/ocaml/xenopsd/xc/device.ml b/ocaml/xenopsd/xc/device.ml
index a9dabdd9159..6d47a2489ef 100644
--- a/ocaml/xenopsd/xc/device.ml
+++ b/ocaml/xenopsd/xc/device.ml
@@ -38,7 +38,7 @@ let finally = Xapi_stdext_pervasives.Pervasiveext.finally
let with_lock = Xapi_stdext_threads.Threadext.Mutex.execute
let internal_error fmt =
- Printf.kprintf
+ Printf.ksprintf
(fun str ->
error "%s" str ;
raise (Xenopsd_error (Internal_error str))
@@ -3628,7 +3628,7 @@ module Dm = struct
Q.Dm.pci_assign_guest ~xs ~index ~host
let ioemu_failed emu fmt =
- Printf.kprintf (fun msg -> raise (Ioemu_failed (emu, msg))) fmt
+ Printf.ksprintf (fun msg -> raise (Ioemu_failed (emu, msg))) fmt
let wait_for_vgpu_state states ~timeout ~xs ~domid ~task vgpus =
let open Xenops_interface.Vgpu in
diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml
index f78e7179e6a..d822a1a41cc 100644
--- a/ocaml/xenopsd/xc/domain.ml
+++ b/ocaml/xenopsd/xc/domain.ml
@@ -1529,7 +1529,7 @@ let restore_common (task : Xenops_task.task_handle) ~xc ~xs
in
(th, ch)
in
- let receive_thread_status threads_and_channels =
+ let[@inline never] receive_thread_status threads_and_channels =
(* Receive the status from all reader threads and let them exit.
This happens in two steps to make sure that we are unblocking and
closing all threads also in case of errors. *)
@@ -1549,9 +1549,7 @@ let restore_common (task : Xenops_task.task_handle) ~xc ~xs
(* Handle results returned by emu-manager *)
let emu_manager_results = handle_results () in
(* Wait for reader threads to complete *)
- let[@inlined never] thread_status =
- receive_thread_status threads_and_channels
- in
+ let thread_status = receive_thread_status threads_and_channels in
(* Chain all together, and we are done! *)
let res =
emu_manager_results >>= fun result ->
diff --git a/ocaml/xenopsd/xc/xenops_server_xen.ml b/ocaml/xenopsd/xc/xenops_server_xen.ml
index c94bbd16b3e..a3317194f24 100644
--- a/ocaml/xenopsd/xc/xenops_server_xen.ml
+++ b/ocaml/xenopsd/xc/xenops_server_xen.ml
@@ -31,7 +31,7 @@ let finally = Xapi_stdext_pervasives.Pervasiveext.finally
let with_lock = Xapi_stdext_threads.Threadext.Mutex.execute
let internal_error fmt =
- Printf.kprintf
+ Printf.ksprintf
(fun str ->
error "%s" str ;
raise (Xenopsd_error (Internal_error str))
diff --git a/xapi-stdext-date.opam b/xapi-stdext-date.opam
index 10658f8b54e..ee8aa096ab2 100644
--- a/xapi-stdext-date.opam
+++ b/xapi-stdext-date.opam
@@ -2,16 +2,13 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Dates"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
depends: [
"dune" {>= "3.0"}
- "ocaml" {>= "4.12"}
- "alcotest" {with-test}
- "astring"
- "base-unix"
+ "clock" {= version}
"ptime"
"odoc" {with-doc}
]
diff --git a/xapi-stdext-encodings.opam b/xapi-stdext-encodings.opam
index 51ef29fe35f..c0f8c27c5e7 100644
--- a/xapi-stdext-encodings.opam
+++ b/xapi-stdext-encodings.opam
@@ -2,7 +2,7 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Encodings"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
diff --git a/xapi-stdext-pervasives.opam b/xapi-stdext-pervasives.opam
index 3dc2d169718..83f4f2da1da 100644
--- a/xapi-stdext-pervasives.opam
+++ b/xapi-stdext-pervasives.opam
@@ -2,7 +2,7 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Pervasives"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
diff --git a/xapi-stdext-threads.opam b/xapi-stdext-threads.opam
index 714a2e01575..de9699fe2e3 100644
--- a/xapi-stdext-threads.opam
+++ b/xapi-stdext-threads.opam
@@ -2,7 +2,7 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Threads"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
diff --git a/xapi-stdext-unix.opam b/xapi-stdext-unix.opam
index 8a7fc149f44..a20dfbb34e0 100644
--- a/xapi-stdext-unix.opam
+++ b/xapi-stdext-unix.opam
@@ -2,7 +2,7 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Unix"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
diff --git a/xapi-stdext-zerocheck.opam b/xapi-stdext-zerocheck.opam
index 8f070a416f3..fce24fb209d 100644
--- a/xapi-stdext-zerocheck.opam
+++ b/xapi-stdext-zerocheck.opam
@@ -2,7 +2,7 @@
opam-version: "2.0"
synopsis: "Xapi's standard library extension, Zerocheck"
maintainer: ["Xapi project maintainers"]
-authors: ["xen-api@lists.xen.org"]
+authors: ["Jonathan Ludlam"]
license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception"
homepage: "https://xapi-project.github.io/"
bug-reports: "https://github.com/xapi-project/xen-api/issues"
diff --git a/xapi.opam b/xapi.opam
index e414d694b2c..387ba542fe6 100644
--- a/xapi.opam
+++ b/xapi.opam
@@ -38,6 +38,7 @@ depends: [
"ppx_deriving_rpc"
"ppx_sexp_conv"
"ppx_deriving"
+ "psq"
"rpclib"
"rrdd-plugin"
"rresult"
diff --git a/xapi.opam.template b/xapi.opam.template
index dc48554787e..49f3902f66a 100644
--- a/xapi.opam.template
+++ b/xapi.opam.template
@@ -36,6 +36,7 @@ depends: [
"ppx_deriving_rpc"
"ppx_sexp_conv"
"ppx_deriving"
+ "psq"
"rpclib"
"rrdd-plugin"
"rresult"