diff --git a/DC-HA-sbd-changing-configuration b/DC-HA-sbd-changing-configuration
index f02766fe8..179ded42c 100644
--- a/DC-HA-sbd-changing-configuration
+++ b/DC-HA-sbd-changing-configuration
@@ -4,7 +4,7 @@ IMG_SRC_DIR="images"
## Profiling
PROFOS="sleha"
-PROFCONDITION="16.0"
+PROFCONDITION="16.0;sbd-diskless;sbd-diskbased"
# Use the STRUCTID if there are several structures in one assembly file
#STRUCTID="STRUCTURE-ID"
#PROFARCH="x86_64;zseries;power;aarch64"
diff --git a/DC-HA-sbd-configuring-diskless b/DC-HA-sbd-configuring-diskless
new file mode 100644
index 000000000..8ae822429
--- /dev/null
+++ b/DC-HA-sbd-configuring-diskless
@@ -0,0 +1,15 @@
+MAIN="ha-sbd-configuring.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+
+## Profiling
+PROFOS="sleha"
+PROFCONDITION="16.0;sbd-diskless"
+# Use the STRUCTID if there are several structures in one assembly file
+STRUCTID="ha-sbd-configuring-diskless"
+#PROFARCH="x86_64;zseries;power;aarch64"
+
+DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
diff --git a/DC-Micro-snapper-basic-concepts b/DC-Micro-snapper-basic-concepts
index 7bb9c0f63..1b8661586 100644
--- a/DC-Micro-snapper-basic-concepts
+++ b/DC-Micro-snapper-basic-concepts
@@ -12,4 +12,4 @@ PROFOS="slmicro"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-# XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/DC-SLES-NVIDIA-jetson b/DC-SAP-SELinux
similarity index 55%
rename from DC-SLES-NVIDIA-jetson
rename to DC-SAP-SELinux
index eebdbf39e..2a677ba7d 100644
--- a/DC-SLES-NVIDIA-jetson
+++ b/DC-SAP-SELinux
@@ -1,16 +1,15 @@
# This file originates from the project https://github.com/openSUSE/doc-kit
# This file can be edited downstream.
-MAIN="NVIDIA-Jetson.asm.xml"
+MAIN="SELinux.asm.xml"
+# Point to the ID of the of your assembly
SRC_DIR="articles"
IMG_SRC_DIR="images"
-PROFCONDITION="suse-product;jetson"
+PROFOS="sles4sap"
+PROFCONDITION="16.0"
#PROFCONDITION="suse-product;beta"
#PROFCONDITION="community-project"
-PROFOS="sles"
-
-STYLEASSEMBLY="/usr/share/xml/docbook/stylesheet/nwalsh5/current/assembly/assemble.xsl"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
-FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
\ No newline at end of file
diff --git a/DC-SAP-intro-sap-ansible-automation b/DC-SAP-intro-sap-ansible-automation
new file mode 100644
index 000000000..8cb1be8ff
--- /dev/null
+++ b/DC-SAP-intro-sap-ansible-automation
@@ -0,0 +1,19 @@
+# This file originates from the project https://github.com/openSUSE/doc-kit
+# This file can be edited downstream.
+
+## Basics
+MAIN="intro-sap-ansible-automation.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+
+## Profiling
+PROFOS="sles4sap"
+PROFCONDITION="16.0"
+#STRUCTID="sles-cockpit"
+#PROFARCH="x86_64;zseries;power;aarch64"
+
+DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
+
+## stylesheet location
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2021-ns"
\ No newline at end of file
diff --git a/DC-SAP-snapper-basics b/DC-SAP-snapper-basics
index 004c3cbd9..7828581e0 100644
--- a/DC-SAP-snapper-basics
+++ b/DC-SAP-snapper-basics
@@ -11,4 +11,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-# XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/DC-SAP-systemd-management b/DC-SAP-systemd-management
new file mode 100644
index 000000000..8b73a9c32
--- /dev/null
+++ b/DC-SAP-systemd-management
@@ -0,0 +1,17 @@
+## ----------------------------
+## Doc Config File for the DB Assembly test
+## ----------------------------
+##
+## Basics
+MAIN="systemd-management.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+## Profiling
+PROFOS="sles4sap"
+#PROFARCH="x86_64;zseries;power;aarch64"
+
+## stylesheet location
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2021-ns"
+
+DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
\ No newline at end of file
diff --git a/DC-SAP-systemd-setting-up-service b/DC-SAP-systemd-setting-up-service
new file mode 100644
index 000000000..fc1033bbb
--- /dev/null
+++ b/DC-SAP-systemd-setting-up-service
@@ -0,0 +1,12 @@
+MAIN="systemd-setting-up-service.asm.xml"
+#ROOTID="setting-up-systemd"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+## Profiling
+PROFOS="sles4sap"
+PROFCONDITION="suse-product"
+#STRUCTID="STRUCTURE-ID"
+#PROFARCH="x86_64;zseries;power;aarch64"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
\ No newline at end of file
diff --git a/DC-Micro-NVIDIA-Jetson b/DC-SAP-vm-host-network-multihome-setup
similarity index 52%
rename from DC-Micro-NVIDIA-Jetson
rename to DC-SAP-vm-host-network-multihome-setup
index 092ebf395..4b09402f4 100644
--- a/DC-Micro-NVIDIA-Jetson
+++ b/DC-SAP-vm-host-network-multihome-setup
@@ -1,16 +1,19 @@
# This file originates from the project https://github.com/openSUSE/doc-kit
# This file can be edited downstream.
-MAIN="NVIDIA-Jetson.asm.xml"
+## Basics
+MAIN="SLES-vm-host-network-multihome-setup.asm.xml"
SRC_DIR="articles"
IMG_SRC_DIR="images"
-PROFCONDITION="suse-product;jetson"
-#PROFCONDITION="suse-product;beta"
-#PROFCONDITION="community-project"
-PROFOS="slmicro"
+## Profiling
+PROFOS="sles4sap"
+PROFCONDITION="16.0"
+#STRUCTID="sles-multihome"
+#PROFARCH="x86_64;zseries;power;aarch64"
-STYLEASSEMBLY="/usr/share/xml/docbook/stylesheet/nwalsh5/current/assembly/assemble.xsl"
+DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
+## stylesheet location
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
diff --git a/DC-SLES-snapper-basics b/DC-SLES-snapper-basics
index 8ca63541c..1100bf19d 100644
--- a/DC-SLES-snapper-basics
+++ b/DC-SLES-snapper-basics
@@ -11,4 +11,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-# XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/DC-SLES-systemd-management b/DC-SLES-systemd-management
new file mode 100644
index 000000000..dccb0a062
--- /dev/null
+++ b/DC-SLES-systemd-management
@@ -0,0 +1,17 @@
+## ----------------------------
+## Doc Config File for the DB Assembly test
+## ----------------------------
+##
+## Basics
+MAIN="systemd-management.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+## Profiling
+PROFOS="sles"
+PROFCONDITION="16.0"
+
+## stylesheet location
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2021-ns"
+
+DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
\ No newline at end of file
diff --git a/DC-SLES-systemd-setting-up-service b/DC-SLES-systemd-setting-up-service
new file mode 100644
index 000000000..c6146fb4e
--- /dev/null
+++ b/DC-SLES-systemd-setting-up-service
@@ -0,0 +1,12 @@
+MAIN="systemd-setting-up-service.asm.xml"
+#ROOTID="setting-up-systemd"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+## Profiling
+PROFOS="sles"
+PROFCONDITION="suse-product"
+#STRUCTID="STRUCTURE-ID"
+#PROFARCH="x86_64;zseries;power;aarch64"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse-ns"
diff --git a/DC-SLES-virtualization-disk-cache b/DC-SLES-virtualization-disk-cache
new file mode 100644
index 000000000..42d944dd4
--- /dev/null
+++ b/DC-SLES-virtualization-disk-cache
@@ -0,0 +1,10 @@
+MAIN="virtualization-disk-cache.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-io b/DC-SLES-virtualization-io
new file mode 100644
index 000000000..222a9c0a5
--- /dev/null
+++ b/DC-SLES-virtualization-io
@@ -0,0 +1,10 @@
+MAIN="virtualization-io.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-libvirt b/DC-SLES-virtualization-libvirt
new file mode 100644
index 000000000..5814ebc27
--- /dev/null
+++ b/DC-SLES-virtualization-libvirt
@@ -0,0 +1,10 @@
+MAIN="virtualization-libvirt.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-qemu b/DC-SLES-virtualization-qemu
new file mode 100644
index 000000000..b469cccb8
--- /dev/null
+++ b/DC-SLES-virtualization-qemu
@@ -0,0 +1,10 @@
+MAIN="virtualization-qemu.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-spice-removal b/DC-SLES-virtualization-spice-removal
new file mode 100644
index 000000000..0bd7bb003
--- /dev/null
+++ b/DC-SLES-virtualization-spice-removal
@@ -0,0 +1,10 @@
+MAIN="virtualization-spice-removal.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-support b/DC-SLES-virtualization-support
new file mode 100644
index 000000000..62d9d19c2
--- /dev/null
+++ b/DC-SLES-virtualization-support
@@ -0,0 +1,10 @@
+MAIN="virtualization-support.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/articles/Micro-upgrade.asm.xml b/articles/Micro-upgrade.asm.xml
index d0e967c31..af629ebab 100644
--- a/articles/Micro-upgrade.asm.xml
+++ b/articles/Micro-upgrade.asm.xml
@@ -50,6 +50,13 @@
Upgrading &productnameshort; from Previous Releases
+ 2026-01-29
+
+
+ Fixed supported versions
+
+
+ 2025-11-04
diff --git a/articles/agama-automated-installation.asm.xml b/articles/agama-automated-installation.asm.xml
index d2d559033..972117131 100644
--- a/articles/agama-automated-installation.asm.xml
+++ b/articles/agama-automated-installation.asm.xml
@@ -41,7 +41,7 @@
- Using Auto&yast; profiles with &agama;
+ Using &ay; profiles with &agama;Initiating automated installation using Agama
@@ -65,7 +65,7 @@
- Compatibility between Auto&yast; and &agama; profiles
+ Compatibility between &ay; and &agama; profiles
@@ -87,13 +87,13 @@
list most recent date/entry at the top -->
-
+ 2025-11-04
@@ -116,7 +116,7 @@
How to automate &productname; installations for unattended bulk deployments using &agama;, ensuring efficient and consistent server provisioning
Automatically install &productnameshort; using &agama;
-
+
@@ -164,7 +164,7 @@
EFFORT
- You may need 30 minutes to read and understand the most important sections of this
+ You need about 30 minutes to read and understand the most important sections of this
article. The time required for customization of Agama profiles and storage
configuration depends on deployment requirements.
@@ -185,7 +185,7 @@
- A bare metal server or a virtual machine. For server installations without any
+ A bare-metal server or a virtual machine. For server installations without any
desktop environment, &suse; recommends a minimum of 1 CPU, 2 GB
memory and 32 GB storage (which includes storage for Btrfs snapshots in
the root partition, swap space, and storage for software packages).
@@ -206,7 +206,7 @@
Optional registration
Certain images that are signed with the developer's key may allow you to skip
- registration before or while installation. Besides, certain images may
+ registration before or during installation. Besides, certain images may
contain all installable packages for your operating system that you can use
as an offline package repository. In such cases, you may not need an active
registration code before installation. However, if you use software packages
diff --git a/articles/agama-based-installation.asm.xml b/articles/agama-based-installation.asm.xml
index 604576a3d..9ba6aee13 100644
--- a/articles/agama-based-installation.asm.xml
+++ b/articles/agama-based-installation.asm.xml
@@ -60,7 +60,8 @@
- Installing &productname; Manually Using &agama;
+ Installing &productname; Manually Using &agama; On &power;&zseries;&x86-64;
diff --git a/articles/ansible.asm.xml b/articles/ansible.asm.xml
index 7ef133a93..34a0ea066 100644
--- a/articles/ansible.asm.xml
+++ b/articles/ansible.asm.xml
@@ -27,7 +27,7 @@
- Introduction to Ansible core
+ Introduction to Ansible Core2025-11-04
@@ -76,11 +76,11 @@
- Introduction to Ansible core
+ Introduction to Ansible Core
Learn how to simplify IT tasks and system management using Ansible core
- Use Ansible automation platform to efficiently automate IT tasks.
+ Use Ansible Automation platform to efficiently automate IT tasks
diff --git a/articles/comparison-sle16-sle15.asm.xml b/articles/comparison-sle16-sle15.asm.xml
index d0f97f00e..b45fc16a0 100644
--- a/articles/comparison-sle16-sle15.asm.xml
+++ b/articles/comparison-sle16-sle15.asm.xml
@@ -38,8 +38,8 @@
- Key Differences Between &sle; 15 and 16
- Adopting &sle; 16
+ Key Differences Between &sle; 15 and &suselinux; 16
+ Adopting &suselinux; 16
@@ -64,11 +64,13 @@
- &sle;
+ &suselinux;
- Adopting for &sle; 16
- Key differences between &slea; 15 and 16
- Key differences between &slea; 15 and 16
+ Adopting for &suselinux; 16
+ Key differences between &slea; 15 and
+ &suselinux; 16
+ Key differences between &slea; 15 and
+ &suselinux; 16
@@ -92,7 +94,7 @@
WHAT?
- This article describes the key differences between &slea; 15 and 16.
+ This article describes the key differences between &slea; 15 and &suselinux; 16.
@@ -100,7 +102,7 @@
WHY?
- This article helps you evaluate an upgrade to &slea; 16.
+ This article helps you evaluate an upgrade to &suselinux; 16.
@@ -116,7 +118,7 @@
GOAL
- Understand how to best adopt &slea; 16.
+ Understand how to best adopt &suselinux; 16.
diff --git a/articles/deployment-pxe-boot.asm.xml b/articles/deployment-pxe-boot.asm.xml
index 092edf1db..224722709 100644
--- a/articles/deployment-pxe-boot.asm.xml
+++ b/articles/deployment-pxe-boot.asm.xml
@@ -88,8 +88,9 @@
- Deploying &productname; Using Network PXE Boot
- on &zseries;
+ Deploying &productname; Using Network PXE Boot On
+ &zseries;&x86-64;
+
@@ -128,14 +129,15 @@
-->
&x86-64;
- &aarch64;
+ &zseries;&productname;
- Deploying &productname; Using Network PXE Boot
- Deploying &productname; using the PXE boot
+ Deploying &productname; Using Network PXE Boot On
+ &zseries;&x86-64;
+ Deploying &productname; using the PXE boot &zseries;&x86-64;
Deploying &productname; using the PXE boot
@@ -382,8 +384,10 @@
REQUIREMENTS
- A properly configured PXE boot server. For details, refer to: PXE
+ server installationPXE
server installation.
diff --git a/articles/deployment-raw-images-ibm-dasd.asm.xml b/articles/deployment-raw-images-ibm-dasd.asm.xml
index 025df4b59..ea7e6dbd2 100644
--- a/articles/deployment-raw-images-ibm-dasd.asm.xml
+++ b/articles/deployment-raw-images-ibm-dasd.asm.xml
@@ -290,7 +290,7 @@
- 2024-10-10
+ 2025-11-04
@@ -307,7 +307,7 @@
-
+
- &productname;
+ &productname;
Deploying &productname; on &zsystems; zFCP disks
Deployment of &productname; raw images on
@@ -348,7 +348,7 @@
https://bugzilla.suse.com/enter_bug.cgiDocumentation
- SUSE Linux Enterprise Micro 6.0
+ SUSE Linux Enterprise Micro 6.2jsindelarova@suse.com
diff --git a/articles/ha-fencing-configuring.asm.xml b/articles/ha-fencing-configuring.asm.xml
index 2b8d5961f..a2168019a 100644
--- a/articles/ha-fencing-configuring.asm.xml
+++ b/articles/ha-fencing-configuring.asm.xml
@@ -65,7 +65,7 @@
&productname;
Configuring Node Fencing in a &ha; Cluster
- How to add physical fencing devices to manage node fencing in a &sleha; cluster
+ How to add physical fencing devices to manage node fencing in a &ha; cluster
Add physical fencing devices to manage HA node fencing
Initial Configuration
@@ -94,7 +94,7 @@
WHAT?
- How to configure a &sleha; cluster to use a physical node fencing device.
+ Node fencing protects the cluster from data corruption by resetting failed nodes.
@@ -120,7 +120,9 @@
GOAL
- Protect the cluster from data corruption by fencing failed nodes.
+ Configure a &ha; cluster to use a physical node fencing device. Physical
+ fencing devices can only be added after the cluster is already installed
+ and running, not during the initial cluster setup.
@@ -143,10 +145,23 @@
- To use &sbd; as the node fencing mechanism instead of a physical device, see
-
- Configuring Disk-Based &sbd; in an Existing &ha; Cluster.
+ To use &sbd; as the node fencing mechanism instead of a physical device, see one of the
+ following articles:
+
+
+
+
+ Configuring Disk-Based &sbd; in an Existing &ha; Cluster
+
+
+
+
+
+ Configuring Diskless &sbd; in an Existing &ha; Cluster
+
+
+
diff --git a/articles/ha-qdevice-configuring.asm.xml b/articles/ha-qdevice-configuring.asm.xml
index e1a9e5cc0..227b4ce66 100644
--- a/articles/ha-qdevice-configuring.asm.xml
+++ b/articles/ha-qdevice-configuring.asm.xml
@@ -97,8 +97,9 @@
WHAT?
- How to use the &crmshell; to configure &qdevice; and &qnet; in a &ha; cluster that is
- already installed and running.
+ &qdevice; and the arbitrator &qnet; help the cluster make quorum calculations in a
+ split-brain scenario. This allows the cluster to sustain more node failures than
+ the standard quorum rules allow.
@@ -106,9 +107,8 @@
WHY?
- &qdevice; and the arbitrator &qnet; participate in quorum calculations in a
- split-brain scenario. This allows the cluster to sustain more node failures than
- the standard quorum rules allow.
+ We recommend using &qdevice; and &qnet; in clusters with an even number of nodes,
+ especially two-node clusters.
@@ -125,8 +125,9 @@
GOAL
- Help the cluster make quorum calculations more easily. This is recommended for
- clusters with an even number of nodes, especially two-node clusters.
+ &qdevice; and &qnet; can be configured during the initial cluster setup or later in
+ a running cluster. This article explains how to configure them in a &ha; cluster
+ that is already installed and running.
diff --git a/articles/ha-sbd-configuring.asm.xml b/articles/ha-sbd-configuring.asm.xml
index 2072d9064..6dd0a3fb4 100644
--- a/articles/ha-sbd-configuring.asm.xml
+++ b/articles/ha-sbd-configuring.asm.xml
@@ -45,6 +45,7 @@
GNU Free Documentation License
+
@@ -94,8 +95,8 @@
WHAT?
- How to use the &crmshell; to configure disk-based &sbd; in a &ha; cluster that is
- already installed and running.
+ &sbd; provides a node fencing mechanism without using an external power-off device.
+ Node fencing protects the cluster from data corruption by resetting failed nodes.
@@ -104,8 +105,7 @@
To be supported, all &sleha; clusters must have node fencing
- configured. &sbd; provides a node fencing mechanism without using an external
- power-off device.
+ configured.
@@ -122,7 +122,9 @@
GOAL
- Protect the cluster from data corruption by fencing failed nodes.
+ &sbd; can be configured during the initial cluster setup or later in a running
+ cluster. This article explains how to configure &sbd; in a &ha; cluster that is
+ already installed and running.
@@ -149,9 +151,14 @@
+
+ To configure diskless &sbd; instead, see
+
+ Configuring Diskless &sbd; in an Existing &ha; Cluster.
+
If the &sbd; service is already running, see
-
+
Changing the Configuration of &sbd;.
@@ -190,12 +197,11 @@
-
+
diff --git a/articles/identifying_os.asm.xml b/articles/identifying_os.asm.xml
index d5f6a1018..04719e0b6 100644
--- a/articles/identifying_os.asm.xml
+++ b/articles/identifying_os.asm.xml
@@ -72,7 +72,7 @@
- &sle; and &slm;
+ &suselinux;
Identifying Operating Systems
How to identify &suse; products using the
@@ -103,7 +103,7 @@
The article describes how you can identify &suse; products and changes coming with
- the last release of &sle;.
+ the last release of &suselinux;.
@@ -137,7 +137,7 @@
- Installed product of the &sle; family.
+ Installed product of the &suselinux; family.
diff --git a/articles/intro-sap-ansible-automation.asm.xml b/articles/intro-sap-ansible-automation.asm.xml
new file mode 100644
index 000000000..a857e4442
--- /dev/null
+++ b/articles/intro-sap-ansible-automation.asm.xml
@@ -0,0 +1,158 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Introduction to Ansible Automation for &sles4sap; 16
+
+
+ 2025-11-04
+
+ Initial version
+
+
+ 2026-01-29
+
+ Updated broken url link
+
+
+ 2026-03-03
+
+ Updated broken url links in more info secton
+
+
+
+
+
+
+
+ &sles4sap;
+
+
+
+ &x86-64;
+ &power;
+ &zseries;
+ &aarch64;
+
+
+ Introduction to Ansible Automation for &sles4sap;
+ Learn about a set of Ansible collections and playbooks designed specifically for &sles4sap; on &productname; 16
+ How to use Ansible Automation for SAP environments on &productname; 16 systems
+
+
+ Administration
+
+
+
+ Authentication
+ Security
+
+
+ Products & Solutions
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ amrita.sakthivel@suse.com
+
+ yes
+
+
+
+
+
+ WHAT?
+
+
+ Learn about a powerful set of Ansible collections and playbooks that are designed for &sap;.
+ This automation codifies best practices, allowing you to build and maintain your &sap; landscapes with speed, reliability and consistency.
+
+
+
+
+ WHY?
+
+
+ Ansible Automation is essential for &sap; environments for organizations to codify and standardize the complex, time-consuming, and high-risk administrative tasks associated with &sap; infrastructure and operations.
+
+
+
+
+ EFFORT
+
+
+ It takes you up to an hour to read through this article.
+
+
+
+
+ GOAL
+
+
+ To transform the management of &sap; environments from a complex, error-prone, and time-intensive manual process into a fast, consistent, and fully auditable Infrastructure as Code (IaC) workflow.
+
+
+
+
+ REQUIREMENTS
+
+
+
+
+ Understanding of Ansible core and how it works
+
+
+
+ Understanding the architecture of &sap; systems
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/articles/klp.asm.xml b/articles/klp.asm.xml
index 87fa8c278..50747c243 100644
--- a/articles/klp.asm.xml
+++ b/articles/klp.asm.xml
@@ -41,6 +41,13 @@
+ 2026-02-12
+
+
+ Fixed pattern name
+
+
+ 2025-11-04
@@ -108,7 +115,7 @@
WHAT?
- Understanding and using &klp; on &sles;.
+ Understanding and using &klp; on &productname;.
diff --git a/articles/packages_lifecycle.asm.xml b/articles/packages_lifecycle.asm.xml
index d64ad87ca..0d5241b57 100644
--- a/articles/packages_lifecycle.asm.xml
+++ b/articles/packages_lifecycle.asm.xml
@@ -18,17 +18,38 @@
-
+
-
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
@@ -42,13 +63,69 @@
- Lifecycles of &sle; Components
+ Lifecycles of &suselinux; Components
+ 2026-03-04
+
+
+
+ Added the node.js update strategies
+
+
+
+ 2026-01-15
+
+
+
+ Added the MariaDB and Ansible update strategies
+
+
+
+ 2026-01-09
+
+
+
+ Added the Perl, PHP, PostgreSQL update strategies
+
+
+
+ 2025-11-28
+
+
+
+ Added the Java update strategies
+
+
+
+ 2025-11-25
+
+
+
+ Added the Valkey and Python update strategies
+
+
+
+ 2025-11-14
+
+
+
+ Removed the FAQ section
+
+
+
+ 2025-11-13
+
+
+
+ Added the &cockpit; section
+
+
+ 2025-11-04
@@ -56,7 +133,7 @@
Initial version
-
+
@@ -69,10 +146,10 @@
- &sle;
+ &suselinux;
- Lifecycles of &sle; Components
- Learn how &sle; 16 categorizes packages into stable, balanced and agile lifecycles to manage updates and system stability
+ Lifecycles of &suselinux; Components
+ Learn how &suselinux; 16 categorizes packages into stable, balanced and agile lifecycles to manage updates and system stability
Understand different types of component lifecycles
@@ -102,7 +179,7 @@
WHAT?
- This article outlines how every &sle; 16 software package is assigned one of three specific update policies: stable, balanced or agile.
+ This article outlines how every &suselinux; 16 software package is assigned one of three specific update policies: stable, balanced or agile.
@@ -138,17 +215,21 @@
-
-
-
-
-
-
-
+
+
+
-
+
+
+
+
+
+
+
+
+
diff --git a/articles/sle16-upgrade.asm.xml b/articles/sle16-upgrade.asm.xml
index 1df98356b..a10b31091 100644
--- a/articles/sle16-upgrade.asm.xml
+++ b/articles/sle16-upgrade.asm.xml
@@ -75,6 +75,20 @@
list most recent date/entry at the top -->
+ 2025-11-17
+
+
+ Fixed default migration target (BSC#1253353)
+
+
+
+ 2025-11-14
+
+
+ Fixed installation time statement (BSC#1253495)
+
+
+ 2025-11-04
@@ -144,8 +158,8 @@
EFFORT
- 15 minutes of reading. The time of the upgrade itself depends on the size of the
- installation, the speed of the system and your Internet connection.
+ 15 minutes of reading. The time of the upgrade itself depends on the system's
+ performance, the installation size and your network connection speed.
diff --git a/articles/sles-pxe-server-setup.asm.xml b/articles/sles-pxe-server-setup.asm.xml
index b982e3701..536241ea8 100644
--- a/articles/sles-pxe-server-setup.asm.xml
+++ b/articles/sles-pxe-server-setup.asm.xml
@@ -45,6 +45,14 @@
Setting Up a PXE Boot Server
+
+ 2026-03-16
+
+
+ Modified procedures for extracting files for online and full iso images based on processor architecture.
+
+
+ 2025-11-04
diff --git a/articles/supportconfig.asm.xml b/articles/supportconfig.asm.xml
index 30596b156..7b053bd37 100644
--- a/articles/supportconfig.asm.xml
+++ b/articles/supportconfig.asm.xml
@@ -52,6 +52,13 @@
Gathering System Information for Support
+ 2025-11-18
+
+
+ Added an installation command for &slm;
+
+
+ 2025-11-04
diff --git a/articles/systemd-basics.asm.xml b/articles/systemd-basics.asm.xml
index f10a8fc0c..6838689f1 100644
--- a/articles/systemd-basics.asm.xml
+++ b/articles/systemd-basics.asm.xml
@@ -113,7 +113,7 @@
&productname;
- &slm;
+ &productname;
@@ -121,7 +121,7 @@
&systemd; service management, dependency tracking, and more.
- Basics of &systemd;, including service management, dependency tracking, logging, resource management, socket activation and system control.
+ Basics of &systemd;, including service management, dependency tracking, logging, resource management, socket activation and system control
diff --git a/articles/systemd-management.asm.xml b/articles/systemd-management.asm.xml
index 81302811b..ebb7283fd 100644
--- a/articles/systemd-management.asm.xml
+++ b/articles/systemd-management.asm.xml
@@ -35,7 +35,9 @@
Troubleshooting systemd management
-
+
+ Converting SysV init to systemd
+
@@ -69,10 +71,10 @@
Managing &systemd; Services
- 2025-11-04
+ 2025-12-16
- Initial version
+ Added a procedure to convert Sys V init to systemd
@@ -80,7 +82,7 @@
-
+
+
diff --git a/articles/systemd-setting-up-service.asm.xml b/articles/systemd-setting-up-service.asm.xml
index 273a1f275..8d4ef1de0 100644
--- a/articles/systemd-setting-up-service.asm.xml
+++ b/articles/systemd-setting-up-service.asm.xml
@@ -31,6 +31,9 @@
Unit dependencies and order
+
+ Securing systemd services
+
@@ -78,7 +81,7 @@
-
+
&productname;
- &productname;
+ &productname;
diff --git a/articles/technical-diffrences-from-sle15.asm.xml b/articles/technical-diffrences-from-sle15.asm.xml
index b5a403e52..385a8cf86 100644
--- a/articles/technical-diffrences-from-sle15.asm.xml
+++ b/articles/technical-diffrences-from-sle15.asm.xml
@@ -45,13 +45,38 @@
- Key Technical Differences Between &slea; 15 and &slea; 16
+ Key Technical Differences Between &slea; 15 and &suselinux; 16
+ 2026-02-12
+
+
+
+ Fixed wording.
+
+
+
+ 2025-11-21
+
+
+
+ Fixed 86-64-v3 package name (BSC#1253331)
+
+
+
+ 2025-11-18
+
+
+
+ Removed a sentence about &selnx; on &sles4sap;
+
+
+ 2025-11-04
@@ -70,13 +95,13 @@
- &sle;
+ &suselinux;
- Key Technical Differences Between &slea; 15 and &slea; 16
+ Key Technical Differences Between &slea; 15 and &suselinux; 16
Technical information about the significant changes
from &productnameshort;15 to 16
- Learn about the key technical differences between &slea; 15 and &slea; 16, including what has been
+ Learn about the key technical differences between &slea; 15 and &suselinux; 16, including what has been
added or removed in the new version
@@ -103,7 +128,7 @@
This article addresses common technical questions about installing and using
- &slea; 16.
+ &suselinux; 16.
@@ -111,7 +136,7 @@
WHY?
- &slea; 16 introduces significant changes compared to its predecessor, &slea; 15.
+ &suselinux; 16 introduces significant changes compared to its predecessor, &slea; 15.
This article helps you navigate those differences.
@@ -128,7 +153,7 @@
GOAL
- You will have a clear understanding of what has been added or removed in &slea; 16.
+ You will have a clear understanding of what has been added or removed in &suselinux; 16.
diff --git a/articles/ulp-livepatching.asm.xml b/articles/ulp-livepatching.asm.xml
index b0de57758..713030654 100644
--- a/articles/ulp-livepatching.asm.xml
+++ b/articles/ulp-livepatching.asm.xml
@@ -101,7 +101,7 @@
&ulpa; patches libraries without system downtime
- &ulpa; allows to fix (security) bugs in libraries on-the-fly without system downtime.
+ &ulpa; allows to fix (security) bugs in libraries on-the-fly without system downtime
diff --git a/articles/virtual-disk-cache-mode-configure.asm.xml b/articles/virtualization-disk-cache.asm.xml
similarity index 96%
rename from articles/virtual-disk-cache-mode-configure.asm.xml
rename to articles/virtualization-disk-cache.asm.xml
index feb55fc44..f1baf1be4 100644
--- a/articles/virtual-disk-cache-mode-configure.asm.xml
+++ b/articles/virtualization-disk-cache.asm.xml
@@ -34,7 +34,7 @@
-
+
Smart Docs
@@ -50,7 +50,7 @@
https://bugzilla.suse.com/enter_bug.cgiDocumentationSUSE Linux Enterprise Server 16.0
- tbazant@suse.com
+ souvik.sarkar@suse.comyes
@@ -63,7 +63,7 @@
- &sles;
+ &productname;
diff --git a/articles/virtualization-io.asm.xml b/articles/virtualization-io.asm.xml
new file mode 100644
index 000000000..80c1bb81b
--- /dev/null
+++ b/articles/virtualization-io.asm.xml
@@ -0,0 +1,114 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ I/O Virtualization for Virtual Machines
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ I/O Virtualization Concepts: VFIO, SR-IOV and PV
+ Learn about the basic concepts of virtualization IO.
+
+
+ I/O virtualization enables virtual machine
+ guests to efficiently access physical hardware using paravirtualization, VFIO and
+ SR-IOV techniques
+
+
+
+
+ WHAT?
+
+
+ I/O virtualization is a hypervisor-based mechanism that allows a &vmguest; to interact
+ with physical hardware devices such as network interfaces and storage controllers,
+ balancing resource efficiency with performance across emulation, direct assignment
+ and hardware-assisted methods.
+
+
+
+
+ WHY?
+
+
+ Use I/O virtualization to reduce latency and CPU overhead for
+ performance-sensitive workloads by avoiding full device emulation.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-libvirt.asm.xml b/articles/virtualization-libvirt.asm.xml
new file mode 100644
index 000000000..d2bf89864
--- /dev/null
+++ b/articles/virtualization-libvirt.asm.xml
@@ -0,0 +1,148 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Managing Virtualization Platforms with &libvirt;
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Managing &kvm; and &qemu; Virtualization with &libvirt;
+ Manage virtualization platforms (&kvm;, &qemu;) with a unified API.
+
+
+ Configure the &libvirt; software toolkit to manage &kvm; or &qemu; virtual machines through a consistent API while optimizing storage pools and network bridges
+
+
+
+
+ WHAT?
+
+
+ &libvirt; is a software toolkit that provides a consistent API for managing virtual machines across multiple hypervisor platforms such as &kvm; and &qemu;.
+
+
+
+
+ WHY?
+
+
+ Use &libvirt; to eliminate the operational complexity of managing vendor-specific
+ tools and to enable a unified management interface for diverse virtualization
+ environments.
+
+
+
+
+ EFFORT?
+
+
+ It takes approximately 20 minutes to read this article and requires basic knowledge of Linux command-line operations and virtualization concepts.
+
+
+
+
+ GOAL
+
+
+ By the end of this guide, you will have a configured &libvirt; environment with functional storage pools and network bridges ready for VM guest deployment.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-qemu.asm.xml b/articles/virtualization-qemu.asm.xml
new file mode 100644
index 000000000..a1d2afc6f
--- /dev/null
+++ b/articles/virtualization-qemu.asm.xml
@@ -0,0 +1,142 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Managing &kvm; Virtualization on &productname;
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ &kvm; and &qemu; Virtualization on &productname; &productnumber;
+ Setting up &kvm; and &qemu; Virtualization on &productname;
+
+
+ Configure &kvm; and &qemu; virtualization on &productname;
+ to optimize guest performance using advanced storage, networking and direct hardware
+ access
+
+
+
+
+ WHAT?
+
+
+ &kvm; (Kernel Virtual Machine) is a virtualization solution that transforms the Linux
+ kernel into a hypervisor for running multiple isolated virtual environments.
+
+
+
+
+ WHY?
+
+
+ Use &kvm; virtualization to consolidate server workloads and significantly save hardware resources.
+
+
+
+
+ EFFORT
+
+
+ It takes less than 15 minutes of reading time to understand the concept of
+ virtualization.
+
+
+
+
+ GOAL
+
+
+ By the end of this guide, you will have a configured &kvm; host and be able to deploy
+ virtual machine guests with optimized storage, networking and direct hardware
+ access.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-spice-removal.asm.xml b/articles/virtualization-spice-removal.asm.xml
new file mode 100644
index 000000000..10afe044f
--- /dev/null
+++ b/articles/virtualization-spice-removal.asm.xml
@@ -0,0 +1,125 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Migrating Spice Virtual Machines to VNC on &productname;
+ Migrate Spice virtual machines to VNC on &productname;
+
+ Learn how to migrate Spice-based virtual machines
+ to VNC on &productname; &productnumber; using libvirt by updating VM XML configurations after Spice
+ removal
+
+
+
+
+ WHAT?
+
+
+ Migrating Spice-based virtual machines to VNC is a configuration process that replaces the deprecated Spice graphical console protocol with VNC on &productname; &productnumber; using libvirt and &kvm;.
+
+
+
+
+ WHY?
+
+
+ This migration ensures continued VM console access on &productname; &productnumber; by replacing the removed Spice protocol with VNC, which is widely supported and actively maintained.
+
+
+
+
+ EFFORT
+
+
+ It takes approximately 20 minutes to complete this procedure and requires basic knowledge of libvirt, virsh commands and XML configuration files.
+
+
+
+
+ GOAL
+
+
+ By the end of this guide, your existing virtual machine will be reconfigured to use VNC instead of Spice and will start successfully on a &productname; &productnumber; host.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-support.asm.xml b/articles/virtualization-support.asm.xml
new file mode 100644
index 000000000..755d372e8
--- /dev/null
+++ b/articles/virtualization-support.asm.xml
@@ -0,0 +1,124 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization Support in &productname;
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ &kvm; Virtualization Support in &productname;
+ &kvm; virtualization support and limits in &productname;
+
+
+ Supported &kvm; virtualization architectures, limits, features and guest operating systems in &productname;
+
+
+
+
+ WHAT?
+
+
+ Virtualization support in &suselinux; defines the supported host and guest configurations, architectures, limits and features for &kvm;-based virtualization.
+
+
+
+
+ WHY?
+
+
+ Use virtualization in &productname; to consolidate hardware, improve resource utilization, reduce operational costs, and support enterprise cloud and container-based workloads.
+
+
+
+
+ EFFORT
+
+
+ It takes approximately 10 minutes to read this article and assumes basic knowledge of Linux system administration and virtualization concepts.
+
+
+
+
+ WHY?
+
+
+ By the end of this article, you will understand the supported virtualization architectures, limits, and guest operating systems in &productname;.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization.asm.xml b/articles/virtualization.asm.xml
index 07247b083..9dd7fa9f4 100644
--- a/articles/virtualization.asm.xml
+++ b/articles/virtualization.asm.xml
@@ -15,9 +15,10 @@
-
-
+
+
+
+ Legal Notice
@@ -29,8 +30,15 @@
- Virtualization
+ Virtualization on SUSE Linux with Benefits and Setup
+ 2026-01-21
+
+
+ &productnameshort; 15 SP7 added to the list of supported guest operating system
+
+
+ 2025-11-04
@@ -46,7 +54,7 @@
Products & Solutions
-
+
Virtualization
@@ -76,10 +84,10 @@
Introduction to virtualization
- Learn about the basic concepts of virtualization.
+ Learn about the basic concepts of virtualization
- By means of virtualization, you can run multiple virtual machines on a single bare-metal host to save resources.
+ Using virtualization, you can run multiple virtual machines on a single bare-metal host to save resources
@@ -87,8 +95,7 @@
WHAT?
- By means of virtualization, you can run multiple virtual machines on a single
- bare-metal host.
+ Virtualization is a technology that allows a single physical server (host) to run multiple virtual machines (guests), each with its own operating system.
@@ -96,8 +103,7 @@
WHY?
- Sharing host hardware between multiple virtualized guests significantly saves
- resources.
+ Use virtualization to reduce hardware costs, save power and space, and improve infrastructure flexibility and productivity.
@@ -105,8 +111,15 @@
EFFORT
- It takes less than 15 minutes of your time to understand the concept of
- virtualization.
+ It takes less than 15 minutes to understand the core concepts of virtualization.
+
+
+
+
+ GOAL
+
+
+ By the end of this article, you will understand the benefits of virtualization and the basic setup of a virtual machine host and guest environment.
@@ -116,11 +129,12 @@
-
+
+
diff --git a/common/generic-entities.ent b/common/generic-entities.ent
index 58af04b68..4e108ac7a 100644
--- a/common/generic-entities.ent
+++ b/common/generic-entities.ent
@@ -464,6 +464,7 @@ use &deng;! -->
+
diff --git a/common/product-entities.ent b/common/product-entities.ent
index 89c410822..d21c87591 100644
--- a/common/product-entities.ent
+++ b/common/product-entities.ent
@@ -59,6 +59,7 @@
+
diff --git a/concepts/about-install-sap-pattern-ansible.xml b/concepts/about-install-sap-pattern-ansible.xml
new file mode 100644
index 000000000..863959ab3
--- /dev/null
+++ b/concepts/about-install-sap-pattern-ansible.xml
@@ -0,0 +1,53 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ About the sles_sap_automation pattern
+
+
+
+
+
+ Using the &suse; package pattern is recommended because it is the simplest and most reliable method for installing all the required SAP automation components.
+
+
+
+ By using the pattern, you ensure all required Ansible collections and the playbook packages are installed together, preventing dependency problems.
+ For &sles4sap; 16, install the sles_sap_automation package pattern:
+ &prompt.sudo;zypper install --type pattern sles_sap_automation
+This command installs the following packages:
+
+ansible-sap-infrastructure
+ansible-sap-install
+ansible-sap-operations
+ansible-sap-playbooks
+
+Each package contains an Ansible Collection with a curated list of supported Ansible roles.
+ To call out each role in a collection, you must use its fully qualified collection name (FQCN). This tells Ansible exactly where to find the role and prevents any naming conflicts with other collections.
+For example:
+
+ ---
+ - name: Ansible Play to install SAP Product
+ hosts: all
+ tasks:
+ - name: Install SAP Product
+ ansible.builtin.include_role:
+ name: suse.sap_install.sap_swpm
+
+
\ No newline at end of file
diff --git a/concepts/ansible-collections.xml b/concepts/ansible-collections.xml
new file mode 100644
index 000000000..06651b928
--- /dev/null
+++ b/concepts/ansible-collections.xml
@@ -0,0 +1,146 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Understanding the sles_sap_automation package pattern
+
+
+
+
+
+ The sles_sap_automation package pattern installs four packages: ansible-sap-install, ansible-sap-infrastructure,
+ ansible-sap-operations and ansible-sap-playbooks. Each package contains an Ansible Collection with a curated list of supported Ansible roles.
+
+
+
+
+The sap_install collection
+The sap_install collection installs, configures and manages &sap; software on Linux operating systems.
+
+This collection covers the following tasks:
+Ensure the operating system is configured, and all required SAP installation media are staged prior to system setup.
+ Install the &hana; database.
+ Install &sap; products such as &s4h;, &netweaver;, etc.
+ Enable &hana; system replication.
+ Configure a &ha; cluster for &hana; and ASCS/ERS.
+
+ The key roles in this collection include:
+
+ sap_general_preconfigure: installs required packages and performs basic OS configuration steps according to applicable SAP notes for installing and running &hana;
+ or &sap; ABAP Application Platform (formerly known as SAP NetWeaver).
+sap_hana_preconfigure: installs additional required packages and performs additional OS configuration steps according to applicable SAP notes for installing and running &hana; after the role sap_general_preconfigure.
+sap_netweaver_preconfigure: installs additional required packages and performs additional OS configuration steps according to applicable SAP notes for installing and running &sap; ABAP Application Platform after the role sap_general_preconfigure.
+sap_storage_setup: prepares a host with the storage requirements of a &sap; system (prior to software installation).
+sap_install_media_detect: detects and extracts &sap; installation media.
+sap_hana_install: installs &hana; using the &hana; database lifecycle manager (HDBLCM).
+sap_ha_install_hana_hsr: configures and enables &hana; system replication between two nodes.
+sap_swpm: installs various &sap; systems installable by SAP Software Provisioning Manager (SWPM).
+sap_ha_pacemaker_cluster: installs and configures Linux Pacemaker High Availability clusters for &hana; and &netweaver; systems on various infrastructure platforms.
+
+ List of all roles:
+&prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_install/roles ls
+ sap_anydb_install_oracle sap_ha_install_anydb_ibmdb2 sap_ha_pacemaker_cluster sap_hana_preconfigure sap_maintain_etc_hosts sap_storage_setup
+ sap_general_preconfigure sap_ha_install_hana_hsr sap_hana_install sap_install_media_detect sap_netweaver_preconfigure sap_swpm
+
+For more details about the roles and role variables in the sap_install collection, refer to the README.md, for example:
+&prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_install/roles/sap_swpm ls
+ README.md defaults meta tasks templates
+
+
+ The sap_infrastructure collection
+ The sap_infrastructure collection provides a set of Ansible roles to automate various infrastructure-related tasks for &sap; systems.
+ The core focus is on creating and configuring the resources across diverse platforms, including cloud hyperscalers and hypervisors.
+ This collection covers the following tasks:
+
+ Provision virtual machines on target infrastructure platforms using Ansible. This includes the provisioning of High Availability resources such as routing and load balancing, where applicable.
+
+ Assign temporary virtual IP addresses (VIPs) for application installation before the cluster takes over management.
+
+ The key roles in this collection are:
+
+ sap_vm_provision: provisions virtual machines to host &sap; software.
+
+ sap_vm_temp_vip: is a temporary virtual IP (VIP) assigned to OS network interface and will be replaced by the Cluster VIP resource once the cluster is configured.
+
+
+ The following infrastructure platforms are supported:
+
+ AWS EC2 virtual server instance
+ Google Cloud Compute Engine virtual machine
+ IBM Cloud, Intel virtual server
+ IBM Cloud, Power virtual server
+ Microsoft Azure virtual machine
+ IBM PowerVM virtual machine
+
+ List of all roles:
+ &prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_infrastructure/roles ls
+ sap_vm_provision sap_vm_temp_vip
+For more details about the roles and role variables in the sap_infrastructure collection, refer to the README.md, for example:
+&prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_infrastructure/roles/sap_vm_provision ls
+ PLATFORM_GUIDANCE.md README.md defaults meta tasks
+Since this collection leverages native cloud automation tools, ensure you meet your target platform's specific prerequisites as detailed in the
+ Infrastructure platform guidance.
+ Requirements are not part of the pattern and are not supported.
+
+
+ The sap_operations collection
+ The sap_operations collection executes various day-to-day operation tasks for &sap; systems.
+ This collection covers the following tasks:
+
+ Start, stop and restart of the &hana; database server.
+ Start, stop and restart of the &netweaver; application server.
+ Operate the &sap; system using sapcontrol and various functions.
+
+ Updating &sap; profiles.
+
+ List of all roles:
+&prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_operations/roles ls
+ sap_control sap_profile_update
+For more details about the roles and role variables in the sap_operations collection, refer to the README.md, for example:
+&prompt.user; /usr/share/ansible/collections/ansible_collections/suse/sap_operations/roles/sap_control ls
+ README.md defaults tasks
+
+ About ansible.playbooks_for_sap
+
+ The ansible.playbooks_for_sap contains Ansible playbooks for deployment of various &sap; software solution scenarios onto different cloud service providers.
+
+To make the playbook easy to use and scalable, the following key principles are used in the design:
+
+ Modular configuration: The playbooks use modular configuration, which is a variable system that lets you focus only on the specific values for your environment, eliminating the need to sift through hundreds of unused settings.
+ Brownfield and Greenfield Support: Flexible by design, the playbooks support both greenfield deployments (creating a landscape from scratch) and brownfield operations (running against existing servers).
+ Interactive mode: For quick setups or for users who prefer not to manage variable files, an interactive mode is available. This mode will prompt you for all the necessary information, guiding you step-by-step.
+ (Optional) Automated Media Downloads: If the community.sap_launchpad Ansible Collection is installed, the playbooks can automatically download the required &sap; installation media for you, saving significant manual effort.
+ This feature requires a valid SAP S-User with the necessary download privileges.
+
+The benefits of Ansible playbooks for &sap; are:
+
+The playbooks are designed for simplicity, making them accessible to users across all levels of Ansible experience.
+Easily reconfigurable and extendable, they let you precisely tailor deployments to your needs. For instance, you can customize them for specific SAP versions, integrate monitoring tools, or adapt them to different network topologies.
+Each playbook contains a complete deployment scenario, which ensures consistency and simplifies the process.
+Deploy the same &sap; solution across diverse infrastructure including different cloud service providers.
+
+For more details about the ansible.playbooks_for_sap collection, refer to the README.md:
+&prompt.user; /usr/share/ansible/playbooks/ansible.playbooks_for_sap ls
+ LICENSE README.md deploy_scenarios docs special_actions
+ and
+&prompt.user; /usr/share/ansible/playbooks/ansible.playbooks_for_sap/docs ls
+ CONTRIBUTING.md CONTRIBUTORS.md DEV_EXECUTION_FLOW.md FAQ.md GET_STARTED_AZURE_DEVOPS.md GET_STARTED_MACOS.md GET_STARTED_WINDOWS.md README.md images sample
+
+
+
\ No newline at end of file
diff --git a/concepts/ansible-role-aide.xml b/concepts/ansible-role-aide.xml
index 1190fd9d6..af294f6ae 100644
--- a/concepts/ansible-role-aide.xml
+++ b/concepts/ansible-role-aide.xml
@@ -39,7 +39,6 @@
- name: Example aide role invocation
hosts: managed_hosts
become: true
- gather_facts: true
tasks:
- name: Include role aide
diff --git a/concepts/ansible-role-certificate.xml b/concepts/ansible-role-certificate.xml
index 652c7da84..6371f01c6 100644
--- a/concepts/ansible-role-certificate.xml
+++ b/concepts/ansible-role-certificate.xml
@@ -38,14 +38,13 @@
hosts: managed_nodes
become: true
- vars:
- certificate_requests:
- - name: mycert
- dns: *.example.com
- ca: self-sign
-
- tasks:
- - name: Issue a self signed certificate
+ tasks:
+ - name: Issue a self signed certificate
+ vars:
+ certificate_requests:
+ - name: mycert
+ dns: *.example.com
+ ca: self-sign
ansible.builtin.include_role:
name: suse.linux_system_roles.certificate
@@ -69,16 +68,14 @@ or string and is optional.
-name: Issue a certificate and specify location
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- certificate_requests:
- - name: test/path/mycert
- dns: *.example.com
- ca: self-sign
- tasks:
- - name: Issue a certificate and specify location
+ tasks:
+ - name: Issue a certificate and specify location
+ vars:
+ certificate_requests:
+ - name: test/path/mycert
+ dns: *.example.com
+ ca: self-sign
ansible.builtin.include_role:
name: suse.linux_system_roles.certificate
diff --git a/concepts/ansible-role-cockpit.xml b/concepts/ansible-role-cockpit.xml
index 9665d04f2..e69ffd331 100644
--- a/concepts/ansible-role-cockpit.xml
+++ b/concepts/ansible-role-cockpit.xml
@@ -36,7 +36,6 @@
- name: install Cockpit on all hosts
hosts: managed_nodes
become : true
- gather_facts: true
tasks:
- name: Dynamically execute the Cockpit role
@@ -50,15 +49,13 @@
- name: Install Cockpit and automatically open firewall port
hosts: managed_hosts
become: true
- gather_facts: true
-
- vars:
- cockpit_enabled: true
- cockpit_started: true
- cockpit_manage_firewall: true
tasks:
- name: Dynamically install Cockpit and open firewall
+ vars:
+ cockpit_enabled: true
+ cockpit_started: true
+ cockpit_manage_firewall: true
ansible.builtin.include_role:
name: suse.linux_system_roles.cockpit
diff --git a/concepts/ansible-role-crypto-policies.xml b/concepts/ansible-role-crypto-policies.xml
index 2212f7992..0fd244f8a 100644
--- a/concepts/ansible-role-crypto-policies.xml
+++ b/concepts/ansible-role-crypto-policies.xml
@@ -40,15 +40,13 @@
- name: Enforce the 'FUTURE' system-wide cryptographic policy
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- crypto_policies_policy: "FUTURE"
- crypto_policies_reload: true
- crypto_policies_reboot_ok: false
tasks:
- name: Enforce the FUTURE policy
+ vars:
+ crypto_policies_policy: "FUTURE"
+ crypto_policies_reload: true
+ crypto_policies_reboot_ok: false
ansible.builtin.include_role:
name: suse.linux_system_roles.crypto_policies
@@ -67,14 +65,12 @@
- name: Manage crypto policies
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- crypto_policies_policy: "DEFAULT:NO-SHA1"
- crypto_policies_reload: false
tasks:
- name: Configure default crypto policy level without SHA1
+ vars:
+ crypto_policies_policy: "DEFAULT:NO-SHA1"
+ crypto_policies_reload: false
ansible.builtin.include_role:
name: suse.linux_system_roles.crypto_policies
diff --git a/concepts/ansible-role-firewalld.xml b/concepts/ansible-role-firewalld.xml
index 6c4acd928..810d3c4d2 100644
--- a/concepts/ansible-role-firewalld.xml
+++ b/concepts/ansible-role-firewalld.xml
@@ -40,16 +40,14 @@
- name: Erase existing config and enable ssh service
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- firewall:
- - previous: replaced
- - service: ssh
- state: enabled
tasks:
- name: reset firewall and enable SSH service
+ vars:
+ firewall:
+ - previous: replaced
+ - service: ssh
+ state: enabled
ansible.builtin.include_role:
name: suse.linux_system_roles.firewall
@@ -72,14 +70,13 @@
- name: Disable TFTP service on managed nodes
hosts: managed_nodes
become: true
- gather_facts: true
- vars:
- firewall:
- - service: tftp
- state: disabled
tasks:
- name: Configure firewall to disable TFTP service
+ vars:
+ firewall:
+ - service: tftp
+ state: disabled
ansible.builtin.include_role:
name: suse.linux_system_roles.firewall
diff --git a/concepts/ansible-role-ha-cluster.xml b/concepts/ansible-role-ha-cluster.xml
index f38ed916e..15f59c2c0 100644
--- a/concepts/ansible-role-ha-cluster.xml
+++ b/concepts/ansible-role-ha-cluster.xml
@@ -35,62 +35,27 @@
Enable and manage cluster services across the defined cluster nodes.Ensure consistent HA setup across multiple environments, adhering to best practices and minimizing manual errors.
-Configure firewall and SELinux for managed nodes
+Configure firewall for managed nodes
---
- - name: Configure firewall and selinux for managed nodes
+ - name: Configure firewall for managed nodes
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- ha_cluster_manage_firewall: true
- ha_cluster_manage_selinux: true
tasks:
- - name: Manage firewall and selinux
+ - name: Manage firewall
+ vars:
+ ha_cluster_manage_firewall: true
ansible.builtin.include_role:
name: suse.linux_system_roles.ha_cluster
ha_cluster_manage_firewall: true: It controls whether the role should automatically configure the system's firewall to permit necessary cluster communication.
-ha_cluster_manage_firewall is a Boolean datatype and accepts values true,false,yes,no, 0 and 1.
+ha_cluster_manage_firewall is a Boolean datatype and accepts values true and false.
When set to true, it automatically adds firewall rules to open the necessary ports for the cluster to function. Default is false.
-
-ha_cluster_manage_selinux: true: Controls whether the HA cluster role will automatically configure the SELinux policy to allow cluster processes to function.
- ha_cluster_manage_selinux is a Boolean datatype and accepts values true,false,yes,no, 0 and 1.
- When set to true, it manages &selnx; and performs actions like setting the correct &selnx; contexts on necessary files and directories, enabling or setting &selnx; booleans required for cluster services etc. Default is false.
-Create pcsd TLS certificates and key files
-
- ---
- - name: Manage HA cluster with TLS certificates and key files
- hosts: managed_nodes
- become: true
- gather_facts: true
-
- vars:
- ha_cluster_pcsd_certificates:
- - name: FILENAME
- common_name: "{{ ansible_hostname }}"
- ca: self-sign
-
- tasks:
- - name: Manage firewall and selinux
- ansible.builtin.include_role:
- name: suse.linux_system_roles.ha_cluster
-
-This example creates self-signed pcsd certificate and private key files in /var/lib/pcsd
- with the file name FILENAME.crt and FILENAME.key.
-
- ha_cluster_pcsd_certificates: is a list of dictionaries used to define TLS certificates for the pcsd service.
- The default is []. It uses the structure of the underlying certificate system role.
-The certificate specific keys are ca, common_name and cert_basename.
-These keys are all string data types.
-
- For more details about all the variables in the HA cluster Linux system role,
refer to the specific README.md file on the control node:/usr/share/ansible/collections/ansible_collections/suse/linux_system_roles/docs
diff --git a/concepts/ansible-role-journald.xml b/concepts/ansible-role-journald.xml
index 028561df6..a81c9500f 100644
--- a/concepts/ansible-role-journald.xml
+++ b/concepts/ansible-role-journald.xml
@@ -39,16 +39,14 @@
- name: Configure persistent storage, limit disk usage, enable user logging and set synchronization frequency
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- journald_persistent: true
- journald_max_disk_size: 1024
- journald_per_user: true
- journald_sync_interval: 1
tasks:
- name: Configure persistent storage, limit disk usage, enable user logging and set synchronization frequency
+ vars:
+ journald_persistent: true
+ journald_max_disk_size: 1024
+ journald_per_user: true
+ journald_sync_interval: 1
ansible.builtin.include_role:
name: suse.linux_system_roles.journald
@@ -65,7 +63,7 @@
Default is false.
journald_sync_interval: 1: limits the total disk space the journal files may use up to 1 Gigabyte (1024 MB).
- journald_max_disk_size is an integer datatype and accepts an integer representing a time span in minutes.
+ journald_sync_interval is an integer datatype and accepts an integer representing a time span in minutes.
By default role does not alter currently used value.
@@ -76,14 +74,12 @@
- name: Configure systemd-journald for persistent storage and disk limits
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- journald_persistent: true
- journald_max_disk_size: 1024
tasks:
- name: Configure journald for persistent storage and disk limits
+ vars:
+ journald_persistent: true
+ journald_max_disk_size: 1024
ansible.builtin.include_role:
name: suse.linux_system_roles.journald
diff --git a/concepts/ansible-role-mssql.xml b/concepts/ansible-role-mssql.xml
index af2879e1a..08e15d725 100644
--- a/concepts/ansible-role-mssql.xml
+++ b/concepts/ansible-role-mssql.xml
@@ -39,15 +39,13 @@
- name: Install and configure basic Microsoft SQL Server
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- mssql_accept_microsoft_sql_server_standard_eula: true
- mssql_password: "{{ sa_secret_password }}"
- mssql_edition: "Developer"
tasks:
- name: Manage basic Microsoft SQL Server
+ vars:
+ mssql_accept_microsoft_sql_server_standard_eula: true
+ mssql_password: "{{ sa_secret_password }}"
+ mssql_edition: "Developer"
ansible.builtin.include_role:
name: suse.linux_system_roles.mssql
diff --git a/concepts/ansible-role-podman.xml b/concepts/ansible-role-podman.xml
index 3a76226d2..9224fdf0c 100644
--- a/concepts/ansible-role-podman.xml
+++ b/concepts/ansible-role-podman.xml
@@ -38,36 +38,34 @@
- name: Manage podman root containers and services
hosts: managed_hosts
become: true
- gather_facts: true
-
- vars:
- podman_firewall:
- - port: 8080/tcp
- state: enabled
- podman_kube_specs:
- - state: started
- kube_file_content:
- apiVersion: v1
- kind: Pod
- metadata:
- name: test1-httpd
- spec:
- containers:
- - name: test1-httpd
- image: registry.access.suse.com/test1/httpd-24
- ports:
- - containerPort: 8080
- hostPort: 8080
- volumeMounts:
- - mountPath: /var/www/html:Z
- name: test1-html
- volumes:
- - name: test1-html
- persistentVolumeClaim:
- claimName: test1-html-volume
tasks:
- name: Dynamically install Cockpit and open firewall
+ vars:
+ podman_firewall:
+ - port: 8080/tcp
+ state: enabled
+ podman_kube_specs:
+ - state: started
+ kube_file_content:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: test1-httpd
+ spec:
+ containers:
+ - name: test1-httpd
+ image: registry.access.suse.com/test1/httpd-24
+ ports:
+ - containerPort: 8080
+ hostPort: 8080
+ volumeMounts:
+ - mountPath: /var/www/html:Z
+ name: test1-html
+ volumes:
+ - name: test1-html
+ persistentVolumeClaim:
+ claimName: test1-html-volume
ansible.builtin.include_role:
name: suse.linux_system_roles.podman
diff --git a/concepts/ansible-role-selinux.xml b/concepts/ansible-role-selinux.xml
index 9d2c7c2f4..7b110ec5a 100644
--- a/concepts/ansible-role-selinux.xml
+++ b/concepts/ansible-role-selinux.xml
@@ -38,15 +38,13 @@
- name: Reset the selinux context
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- selinux_restore_dirs:
- - /var/www/
- - /etc/
tasks:
- name: reset the selinux context
+ vars:
+ selinux_restore_dirs:
+ - /var/www/
+ - /etc/
ansible.builtin.include_role:
name: suse.linux_system_roles.selinux
@@ -61,19 +59,17 @@
- name: set a selinux port label
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- selinux_ports:
- - ports: 8080
- proto: tcp
- setype: http_port_t
- state: present
- tasks:
- - name: set a selinux port label
- ansible.builtin.include_role:
- name: suse.linux_system_roles.selinux
+ tasks:
+ - name: set a selinux port label
+ vars:
+ selinux_ports:
+ - ports: 8080
+ proto: tcp
+ setype: http_port_t
+ state: present
+ ansible.builtin.include_role:
+ name: suse.linux_system_roles.selinux
selinux_ports: manages the &selnx; port labeling policy on a managed node.
diff --git a/concepts/ansible-role-ssh.xml b/concepts/ansible-role-ssh.xml
index 2f74b8180..9df3d2810 100644
--- a/concepts/ansible-role-ssh.xml
+++ b/concepts/ansible-role-ssh.xml
@@ -37,26 +37,24 @@
- name: Manage ssh clients
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- ssh_user: root
- ssh:
- Compression: true
- # wokeignore:rule=master
- ControlMaster: auto
- ControlPath: ~/.ssh/.cm%C
- Match:
- - Condition: "final all"
- GSSAPIAuthentication: true
- Host:
- - Condition: example
- Hostname: example.com
- User: user1
- ssh_ForwardX11: false
tasks:
- name: manage SSH clients
+ vars:
+ ssh_user: root
+ ssh:
+ Compression: true
+ # wokeignore:rule=master
+ ControlMaster: auto
+ ControlPath: ~/.ssh/.cm%C
+ Match:
+ - Condition: "final all"
+ GSSAPIAuthentication: true
+ Host:
+ - Condition: example
+ Hostname: example.com
+ User: user1
+ ssh_ForwardX11: false
ansible.builtin.include_role:
name: suse.linux_system_roles.ssh
diff --git a/concepts/ansible-role-suseconnect.xml b/concepts/ansible-role-suseconnect.xml
index 53c2ac327..48b2befa5 100644
--- a/concepts/ansible-role-suseconnect.xml
+++ b/concepts/ansible-role-suseconnect.xml
@@ -28,7 +28,7 @@
You can use this role to:Register a SUSE system to the SCC or servers.
- Activate or remove of specific add-on products or modules.
+ Activate or remove specific add-on products or modules.Deregister systems or products.Support transactional update register SLE-MICRO.Recheck tasks to ensure a smooth registration process.
@@ -40,20 +40,18 @@
- name: Register with SCC and activate modules
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- suseconnect_base_product:
- key: '{{ sles_registration_key }}'
- product: '{{ ansible_distribution }}'
-
- suseconnect_subscriptions:
- - {name: "sle-module-containers", state: enabled}
- - {name: "PackageHub", state: disabled}
- - {name: "sle-module-python3", state: enabled}
tasks:
- name: Register system and modules with SUSE Customer Center
+ vars:
+ suseconnect_base_product:
+ key: '{{ sles_registration_key }}'
+ product: '{{ ansible_distribution }}'
+
+ suseconnect_subscriptions:
+ - {name: "sle-module-containers", state: enabled}
+ - {name: "PackageHub", state: disabled}
+ - {name: "sle-module-python3", state: enabled}
ansible.builtin.include_role:
name: suse.linux_system_roles.suseconnect
@@ -74,18 +72,16 @@
- name: Deregister products from SCC
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- suseconnect_deregister: true
tasks:
- name: Deregister products from SUSE Customer Center
+ vars:
+ suseconnect_deregister: true
ansible.builtin.include_role:
name: suse.linux_system_roles.suseconnect
- suseconnect_deregister: true: instructs the system to deactivate its base subscription and remove it from the the SCC or SMT servers.
+ suseconnect_deregister: true: instructs the system to deactivate its base subscription and remove it from the SCC or SMT servers.
suseconnect_deregister is a Boolean datatype and accepts values false and true. When set to
true, it executes the necessary SUSEConnect command to deregister the system's base product subscription. Default is false.
diff --git a/concepts/ansible-role-systemd.xml b/concepts/ansible-role-systemd.xml
index 90f432400..664e22cde 100644
--- a/concepts/ansible-role-systemd.xml
+++ b/concepts/ansible-role-systemd.xml
@@ -37,20 +37,19 @@
- name: Deploy and start a systemd unit
hosts: managed_nodes
become: true
- gather_facts: true
- vars:
- systemd_unit_file_templates:
- - test.service.j2
- systemd_started_units:
- - item: test.service
- user: root
- - item: test1.service
- user: user1
- systemd_enabled_units:
- - test.service
tasks:
- name: Manage a systemd unit
+ vars:
+ systemd_unit_file_templates:
+ - test.service.j2
+ systemd_started_units:
+ - item: test.service
+ user: root
+ - item: test1.service
+ user: user1
+ systemd_enabled_units:
+ - test.service
ansible.builtin.include_role:
name: suse.linux_system_roles.systemd
diff --git a/concepts/ansible-role-timesync.xml b/concepts/ansible-role-timesync.xml
index e6c99ee37..3fd7bee39 100644
--- a/concepts/ansible-role-timesync.xml
+++ b/concepts/ansible-role-timesync.xml
@@ -38,20 +38,18 @@
- name: Manage timesync
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- timesync_ntp_servers:
- - hostname: ntp.example.com
- iburst: true
- - hostname: time.example.com
- iburst: true
- - hostname: sync.example.com
- iburst: true
tasks:
- name: Manage timesync
- ansible.builtin.include_role:
+ vars:
+ timesync_ntp_servers:
+ - hostname: ntp.example.com
+ iburst: true
+ - hostname: time.example.com
+ iburst: true
+ - hostname: sync.example.com
+ iburst: true
+ ansible.builtin.include_role:
name: suse.linux_system_roles.timesync
@@ -68,16 +66,14 @@ required string datatype and iburst is an
- name: Manage timesync in PTP domain 0, interface eth0
hosts: managed_nodes
become: true
- gather_facts: true
-
- vars:
- timesync_ptp_domains:
- - number: 0
- interfaces: [eth0]
tasks:
- name: Manage linuxptp
- ansible.builtin.include_role:
+ vars:
+ timesync_ptp_domains:
+ - number: 0
+ interfaces: [eth0]
+ ansible.builtin.include_role:
name: suse.linux_system_roles.timesync
diff --git a/concepts/deployment-pxe-introduction.xml b/concepts/deployment-pxe-introduction.xml
index 2cd358116..13a206009 100644
--- a/concepts/deployment-pxe-introduction.xml
+++ b/concepts/deployment-pxe-introduction.xml
@@ -54,10 +54,10 @@
&combustion;/&ignition; configuration on the TFTP server. For details, refer to .
-
+
Prepare the TFTP or HTTP server by adding the installation image and configuring the boot
- settings. For details refer to preparing installation files.
+ settings. For details refer to preparing installation filespreparing installation files.
diff --git a/concepts/ha-sbd-what-is.xml b/concepts/ha-sbd-what-is.xml
index 6ee75af19..a16ab1d2b 100644
--- a/concepts/ha-sbd-what-is.xml
+++ b/concepts/ha-sbd-what-is.xml
@@ -19,19 +19,19 @@
- &sbd; (&sbd.long;) provides a node fencing mechanism without using an external power-off
- device. The software component (the &sbd; daemon) works together with a watchdog device to
- ensure that misbehaving nodes are fenced. &sbd; can be used in disk-based mode with shared
- block storage, or in diskless mode using only the watchdog.
+ &sbd; (&sbd.long; or Storage-Based Death) provides a node fencing mechanism without using
+ an external power-off device. The software component (the &sbd; daemon) works together with
+ a watchdog device to ensure that misbehaving nodes are fenced. &sbd; can be used in
+ disk-based mode with shared block storage, or in diskless mode using only the watchdog.
-
+
Disk-based &sbd; uses shared block storage to exchange fencing messages between the nodes.
It can be used with one to three devices. One device is appropriate for simple cluster setups,
but two or three devices are recommended for more complex setups or critical workloads.
-
- Diskless &sbd; fences nodes by using only the watchdog, without relying on a shared storage
- device. A node is fenced if it loses quorum, if any monitored daemon is lost and cannot be
+
+ Diskless &sbd; fences nodes by using only the watchdog, without a shared storage device.
+ A node is fenced if it loses quorum, if any monitored daemon is lost and cannot be
recovered, or if &pace; determines that the node requires fencing.
@@ -49,7 +49,7 @@
-
+ &sbd; device (disk-based &sbd;)
@@ -58,7 +58,7 @@
-
+ Messages (disk-based &sbd;)
@@ -80,7 +80,7 @@
Limitations and recommendations
-
+ Disk-based &sbd;
@@ -121,7 +121,7 @@
-
+ Diskless &sbd;
diff --git a/concepts/introduction-automated-installation-using-agama.xml b/concepts/introduction-automated-installation-using-agama.xml
index b952dc20b..d9729af81 100644
--- a/concepts/introduction-automated-installation-using-agama.xml
+++ b/concepts/introduction-automated-installation-using-agama.xml
@@ -27,8 +27,8 @@
This article describes how to use &agama; for automated and unattended installation of
&productname;. You can use JSON profiles describing different aspects
- of intended system, and &agama; installs accordingly. While not a fully backward compatible
- replacement for Auto&yast;, it simplifies the task of automated installation and provides
+ of the intended system, and &agama; installs accordingly. While not a fully backward compatible
+ replacement for &ay;, it simplifies the task of automated installation and provides
multiple clients for interactive and automated installation.
@@ -36,23 +36,23 @@
What is &agama;
- Agama as a service-based Linux installer capable of performing both interactive and
- unattended installations. You can provide &agama; a JSON profile file detailing the initial
+ Agama is a service-based Linux installer capable of performing both interactive and
+ unattended installations. You can provide &agama; with a JSON profile file detailing the initial
system state, such as user authentication, partitioning, networking and software selection.
- On receiving the profile and instruction for installation from one of its supported clients,
+ On receiving the profile and instructions for installation from one of its supported clients,
&agama; installs your target system accordingly. Users can interact with and control the
- installation process using &agama;'s web-based user interface, command-line interface and
+ installation process using &agama;'s Web-based user interface, command-line interface and
HTTP API, facilitating automation and integration into existing workflows.
While &agama; reuses many principles and internal components from previous SUSE installers
- like &yast; and Auto&yast;, and offers a high level of backwards compatibility for unattended
- installations, it is not a 100% compatible drop-in replacement for all Auto&yast; features.
+ like &yast; and &ay;, and offers a high level of backward compatibility for unattended
+ installations, it is not a 100% compatible drop-in replacement for all &ay; features.
&agama; focuses only on the installation process rather than being a general configuration
tool.
- &agama; server client architecture
+ &agama; client-server architecture
@@ -60,15 +60,19 @@
+
+ Diagram of &agama;'s client-server architecture showing three clients—CLI,
+ Web UI and others—connecting to the &agama; core via an HTTP API
+
- Why use &agama; for automated installation
+ Why use &agama; for automated installation?
- &agama;offers its installation service by exposing an HTTP API, which you can use
- interactively from a web-based interface and a command-line interface (CLI), or provide a
- JSON profile to &agama; for automated installation of a target system. Using the HTTP API you
+ &agama;offers its installation service through an HTTP API, which you can use
+ interactively from a Web-based interface and a command-line interface (CLI), or provide a
+ JSON profile to &agama; for automated installation of a target system. Using the HTTP API, you
can also integrate with custom scripts and deployment tools. The benefits of using Agama for
automated installation are as follows:
@@ -79,16 +83,16 @@
&agama; focuses on core installation tasks such as user authentication, network
configuration, storage setup and software installation, delegating further
- configuration to other tools such as &ansible; &salt;, &cockpit; or &openscap;.
+ configuration to other tools such as &ansible;, &salt;, &cockpit; or &openscap;.
- Profile based installation
+ Profile-based installation
You can define installation parameters for the target system in an easily readable and
- editable JSON or Jsonnet profile. Existing XML based Auto&yast; are also supported with
+ editable JSON or Jsonnet profile. Existing XML-based &ay; profiles are also supported with
some exceptions.
@@ -100,8 +104,8 @@
The profile allows detailed setup including user authentication, product registration,
network connections, storage (drives, partitions, LVM, RAID, encryption, resizing,
deleting), software selection by patterns and packages, localization (language,
- keyboard, timezone) and many other aspects of the target system that are not exposed in
- the graphical or web-based interface. This helps in a more granular control over the
+ keyboard, time zone) and many other aspects of the target system that are not exposed in
+ the graphical or Web-based interface. This provides a more granular level of control over the
installation parameters.
@@ -111,20 +115,20 @@
&agama; supports dynamic profiles using Jsonnet, injecting hardware information that
- can be processed at runtime. This avoids reliance on Auto&yast;'s rules or ERB for
+ can be processed at runtime. This avoids reliance on &ay;'s rules or ERB for
dynamic configurations.
- Auto&yast; compatibility
+ &ay; compatibility
- &agama; offers a mechanism to reuse existing AutoYaST profiles to a great extent. It
+ &agama; offers a mechanism to reuse existing &ay; profiles to a great extent. It
supports some dynamic features such as pre-scripts, rules/classes, and Embedded Ruby
- (ERB) when using Auto&yast; profiles. A legacyAutoyastStorage
- section allows direct use of the Auto&yast; profile's partitioning
- section for backwards compatibility.
+ (ERB) when using &ay; profiles. A legacyAutoyastStorage
+ section allows direct use of the &ay; profile's partitioning
+ section for backward compatibility.
@@ -143,11 +147,11 @@
The typical way to start an unattended installation from an ISO image is using the
- inst.auto kernel boot option, pointing to the profile URL or it's
+ inst.auto kernel boot option, pointing to the profile URL or its
location in the hard drive. You can also use the agama profile
import command from the &agama; CLI to load a profile, followed by the
- agama install command. The CLI also allows inspection, modification,
- validation of the profile, and subsequent monitoring of the installation process.
+ agama install command. The CLI also allows inspection, modification
+ and validation of the profile, and subsequent monitoring of the installation process.
diff --git a/concepts/packages_lifecycle_ansible.xml b/concepts/packages_lifecycle_ansible.xml
new file mode 100644
index 000000000..5868b9797
--- /dev/null
+++ b/concepts/packages_lifecycle_ansible.xml
@@ -0,0 +1,84 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Ansible update strategy
+
+
+
+
+ Ansible is an open source IT automation engine that simplifies the process of provisioning, configuration management, application deployment, and orchestration by using a human-readable, agentless language.
+
+
+
+
+ The &ansible; stack follows the agile lifecycle category. &ansible; packages are updated
+ as described below:
+
+
+ Ansible components release frequency
+
+
+
+
+
+
+
+
+ Component
+ Packages
+ Release frequency
+
+
+
+
+
+ Ansible interpreter
+ ansible, ansible-core
+ Twice a yea (typically May and November)
+
+
+ Linux System Roles
+ ansible-linux-system-roles
+ Frequently
+
+
+ SAP roles & playbooks
+ ansible-sap-install,
+ ansible-sap-infrastructure,
+ ansible-sap-operations, ansible-sap-playbooks
+
+ Irregularly
+
+
+ Unsupported SAP roles
+ ansible-sap-launchpad
+
+ Irregularly
+
+
+
+
+
+ On &sles;, the Ansible interpreter and Linux system roles are supported. On &sles4sapa;, also SAP
+ roles & playbooks are supported. The support covers only the roles and playbooks that are
+ delivered by &suse;.
+
+
+
diff --git a/concepts/packages_lifecycle_cockpit.xml b/concepts/packages_lifecycle_cockpit.xml
new file mode 100644
index 000000000..efe171684
--- /dev/null
+++ b/concepts/packages_lifecycle_cockpit.xml
@@ -0,0 +1,53 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ &cockpit; update strategy
+
+
+
+
+ &cockpit; is a Web-based graphical interface that enables you to manage
+ most administration tasks from one place. This section explains how often
+ &cockpit; is updated.
+
+
+
+
+ &cockpit; is updated and supported according to the following rules:
+
+
+
+
+ Each &suselinux; minor release comes with the latest &cockpit; version.
+
+
+
+
+ Support for this version continues throughout the entire lifecycle of the minor release,
+ with any necessary fixes provided through backports.
+
+
+
+
+To make use of upstream improvements, &suse; may upgrade to a newer version when reasonable (for example, due to bug fixes, security updates, or usability improvements), especially in LTS situations.
+
+
+
+
diff --git a/concepts/packages_lifecycle_desktop_components.xml b/concepts/packages_lifecycle_desktop_components.xml
index 544126e8f..1d2eee288 100644
--- a/concepts/packages_lifecycle_desktop_components.xml
+++ b/concepts/packages_lifecycle_desktop_components.xml
@@ -86,7 +86,7 @@ The Qt 6 is delivered. The initial version is Qt 6.9.
KDE
-Not a standard &sle; delivery; available only in PackageHub 16.
+Not a standard &suselinux; delivery; available only in PackageHub 16.
diff --git a/concepts/packages_lifecycle_java.xml b/concepts/packages_lifecycle_java.xml
new file mode 100644
index 000000000..9217aa51b
--- /dev/null
+++ b/concepts/packages_lifecycle_java.xml
@@ -0,0 +1,73 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Java update strategy
+
+
+
+
+ On &suselinux; 16, Java is delivered and supported in the following versions:
+
+
+
+
+
+
+ Java 17: supported until December 2027
+
+
+
+
+ Java 21: supported until October 2031
+
+
+
+
+ Java 25: support date to be confirmed after this version is
+ released, expected support date is October 2033
+
+
+
+
+For the available versions, the following criteria apply:
+
+
+
+
+ Concurrent installation of different versions is supported
+
+
+
+
+ The Java versions are maintained independently of the &suselinux; minor version release cycle
+
+
+
+
+ Each version is supported throughout the general support and LTS phase
+
+
+
+
+ Java packages can be updated in an agile manner
+
+
+
+
diff --git a/concepts/packages_lifecycle_mariadb.xml b/concepts/packages_lifecycle_mariadb.xml
new file mode 100644
index 000000000..7d53585ce
--- /dev/null
+++ b/concepts/packages_lifecycle_mariadb.xml
@@ -0,0 +1,40 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ MariaDB update strategy
+
+
+
+
+ MariaDB is a high-performance, open source relational database management system (RDBMS) that was created as a community-developed fork of MySQL to ensure the software remains free and accessible under the GPL license.
+
+
+
+
+ MariaDB follows the balanced life cycle—only LTS MariaDB releases are supported.
+
+
+ Each &suselinux; minor version gets a new version of MariaDB. This version
+ is supported for the General Support or LTS period. The starting version of
+ MariaDB is 11.4.5.
+
+
+ Minor version updates, aligned with upstream releases, are provided exclusively for the mariadb and mariadb-connector-c packages.
+
diff --git a/concepts/packages_lifecycle_nodejs.xml b/concepts/packages_lifecycle_nodejs.xml
new file mode 100644
index 000000000..1ed00ba4e
--- /dev/null
+++ b/concepts/packages_lifecycle_nodejs.xml
@@ -0,0 +1,67 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ node.js update strategy
+
+
+
+
+ Node.js is an open source, cross-platform JavaScript runtime environment that allows developers to build and run server-side applications outside of a web browser. It is built on Google Chrome's V8 engine and uses an event-driven, non-blocking I/O model, making it highly efficient for creating scalable network applications.
+
+
+
+
+ On &suselinux; 16, node.js is updated and supported according to the following rules:
+
+
+
+
+ The node.js version is kept in sync with the upstream LTS version.
+
+
+
+
+ Each &suselinux; 16 minor release has the latest Active LTS version,
+ starting with nodejs 22, and comes with a default version of node.js.
+
+
+
+
+All node.js minor releases are available generally after security fixes or on request.
+
+
+
+
+ Different node.js versions can be installed in parallel.
+
+
+
+
+ Versions in a specific &suselinux; minor release are supported until the end of LTS.
+
+
+
+
+ To always use the most recent recommended version available in the repository, applications should use BuildRequires: nodejs >= 22.
+
+
+
+
diff --git a/concepts/packages_lifecycle_perl.xml b/concepts/packages_lifecycle_perl.xml
new file mode 100644
index 000000000..8f1116b0b
--- /dev/null
+++ b/concepts/packages_lifecycle_perl.xml
@@ -0,0 +1,33 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Perl update strategy
+
+
+
+
+ Perl is a high-level, general-purpose programming language originally developed for text manipulation that has since evolved into a versatile tool used for system administration, Web development, and network programming.
+
+
+
+
+ The Perl update strategy is considered stable and no new version is expected to be released.
+
+
diff --git a/concepts/packages_lifecycle_php.xml b/concepts/packages_lifecycle_php.xml
new file mode 100644
index 000000000..73bf0f11f
--- /dev/null
+++ b/concepts/packages_lifecycle_php.xml
@@ -0,0 +1,38 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ PHP update strategy
+
+
+
+
+ PHP is an open source server-side scripting language designed specifically for Web development to create dynamic, interactive content that interacts with databases.
+
+
+
+
+ PHP packages follow the balanced life cycle. The starting version is 8.4 and a new version is shipped with each minor version
+ of &suselinux; 16. Patches are delivered even within the minor &suselinux; release.
+
+
+ Each version is supported for 4 years – 1.5 years on top of the 2.5 years provided by upstream.
+
+
+
diff --git a/concepts/packages_lifecycle_postgresql.xml b/concepts/packages_lifecycle_postgresql.xml
new file mode 100644
index 000000000..75d608908
--- /dev/null
+++ b/concepts/packages_lifecycle_postgresql.xml
@@ -0,0 +1,38 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ PostgreSQL
+
+
+
+
+ PostgreSQL is an open-source object-relational database management system (ORDBMS) known for its reliability and extensive support for advanced data types and complex queries.
+
+
+
+
+ On &suselinux; 16, all PostgreSQL versions maintained by the upstream community are supported.
+ When a new version is released by upstream, it is added to each minor &suselinux; version under
+ general or LTS support.
+
+
+ Once a PostgreSQL version reaches its end of life, it remains accessible within the repositories but will no longer receive security patches or bug fixes.
+
+
diff --git a/concepts/packages_lifecycle_python.xml b/concepts/packages_lifecycle_python.xml
new file mode 100644
index 000000000..e499096a9
--- /dev/null
+++ b/concepts/packages_lifecycle_python.xml
@@ -0,0 +1,191 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Python update strategy
+
+
+
+
+ The primary Python interpreter with its stack is 3.13 in &sle; &productnumber;
+
+
+
+
+ Python interpreter and stack support lifecycles
+
+ The support lifecycles of the Python interpreter are the following:
+
+
+
+ Short-term support interpreter
+
+
+ It is supported for 2 years. The support lifecycle applies also to basic packages:
+ setuptools, venv, pip, wheel and pipx.
+
+
+
+
+ Primary Python
+
+
+ The OS's main Python. It is supported for 4 years and includes the interpreter and its stack,
+ /usr/bin/python3 and all packages with the python3-
+ prefix.
+
+
+
+
+ Legacy Python
+
+
+It is supported for 4 years and includes the interpreter and stack but not the
+python3- packages. A legacy interpreter is a former primary Python. Short-term interpreters, by contrast, follow a different lifecycle and are becoming legacy.
+
+
+
+
+
+
+ Python release cycle
+
+ The starting version of Python in &suselinux; 16 is Python 3.13. &suse; plans to release every
+ odd version of Python and provide the LTS for up to 8 years.
+
+
+ The latest Python interpreter is delivered with each minor release with only short-term support. The
+ outdated Python version is migrated to the legacy mode and is supported for another 4 years.
+
+
+ The following table shows probable versions of Python to be delivered with particular
+ &suselinux; versions. The exact version will depend on the upstream community.
+
+
+ Python versions delivered per &suselinux; minor release
+
+
+
+
+
+
+
+
+
+
+
+
+ Python version
+
+
+ &suselinux; minor release
+ 3.13
+ 3.14
+ 3.15
+ 3.16
+ 3.17
+ 3.18
+ 3.19
+
+
+
+
+
+ &suselinux; 16.0
+ The primary Python stack
+
+
+
+
+
+
+
+
+
+ &suselinux; 16.1
+ The primary Python stack
+ As the short-term support interpreter
+
+
+
+
+
+
+
+
+ &suselinux; 16.2
+ The primary Python stack
+
+ As the short-term support interpreter
+
+
+
+
+
+
+
+ &suselinux; 16.3
+ The legacy Python
+
+ The primary Python
+ As the short-term support interpreter
+
+
+
+
+
+
+ &suselinux; 16.4
+
+
+ The primary Python
+
+ As the short-term support interpreter
+
+
+
+
+
+ &suselinux; 16.5
+
+
+ The legacy Python
+
+ The primary Python
+ As the short-term support interpreter
+
+
+
+
+ &suselinux; 16.6
+
+
+
+
+ The primary Python
+
+ As the short-term support interpreter
+
+
+
+
+
+
+
+
diff --git a/concepts/packages_lifecycle_toolchain.xml b/concepts/packages_lifecycle_toolchain.xml
index 6740c1f89..94ce2eac1 100644
--- a/concepts/packages_lifecycle_toolchain.xml
+++ b/concepts/packages_lifecycle_toolchain.xml
@@ -33,15 +33,14 @@
GNU C library (glibc)
The initial glibc version is 2.40. The package is
- updated with each minor &slea; release if there are reasons for changes to the package (for
+ updated with each minor &suselinux; release if there are reasons for changes to the package (for
example feature requests, performance tuning and so on).
Package updates provide backward compatibility for dynamic linking, allowing programs built
- on previous &slea; 16 releases to run. On the contrary, symbols deprecated in the
+ on previous &suselinux; 16 releases to run. On the contrary, symbols deprecated in the
upstream glibc version will not be declared for the compiler and not
- available for link editing (static linking). Such cases, when source level and static linking
- backward compatibility is not guaranteed, are properly documented.
+ available for link editing (static linking).
@@ -50,25 +49,25 @@
Developers of user-space applications can use the supported GNU Compiler Collection (GCC) C
and C++ built-in compilers. Compilers for other languages, cross-compilers and accelerator
- offloading compilers are not available on &slea; from standard repositories, but developers
+ offloading compilers are not available on &suselinux; from standard repositories, but developers
can install them from Package Hub with community support.
- The initial major version in &slea; 16.0 and 16.1 is GCC 15. Later &slea; releases will introduce the
+ The initial major version in &suselinux; 16.0 and 16.1 is GCC 15. Later &suselinux; releases will introduce the
tick-tock model:
- Each even minor release of &slea; (the tick release) introduces a new major version of
- GCC. This GCC version is supported during the LTS for the minor version that introduced
- it and also for the next &slea; minor version. For example, if GCC 17 is introduced in
- &slea; 16.2, it will be supported until the end of LTS for &slea; 16.3.
+ Each even minor release of &suselinux; (the tick release) introduces a new major version of GCC as
+ the default compiler. This GCC version is supported during the LTS for the minor version that
+ introduced it and also for the next &suselinux; minor version. For example, if GCC 17 is introduced
+ in &suselinux; 16.2, it will be supported until the end of LTS for &suselinux; 16.3.
- Each odd minor release of &slea; (the tock release) comes with a new non-default major
+ Each odd minor release of &suselinux; (the tock release) comes with a new non-default major
version of GCC. As this version is not the default one, you must explicitly
invoke the binaries gcc-x, g++-x and
gfortran-x to use it. These non-default versions are supported for 24 months.
@@ -149,7 +148,11 @@
For C++, the most recent supported version is ISO/IEC 14882:2017 (known as C++17) with GNU extensions.
- &suse; also provides unsupported packages with compilers for Fortran, Ada and Go (gcc-go).
+ For Fortran, we support versions up to Fortran 95 (ISO/IEC 1539:1997) with specific features of
+ later standard versions as described in the manual.
+
+
+ &suse; also provides unsupported packages with compilers for Ada and Go (gcc-go).
@@ -161,13 +164,13 @@
The compiler for kernel modules is not intended for general use. The kernel module compiler
- may also be dropped in a future minor release of &slea;.
+ may also be dropped in a future minor release of &suselinux;.
The build compiler
-There is a default compiler that is used internally to build &slea; 16 packages in the version GCC
+There is a default compiler that is used internally to build &suselinux; 16 packages in the version GCC
13. The build compiler is provided as an unsupported package in PackageHub.
@@ -180,11 +183,11 @@ There is a default compiler that is used internally to build &slea; 16 packages
GCC and C++ runtime libraries
GCC runtime libraries (libgcc, libstdc++) are updated to the
-versions of a new major version of GCC on a yearly basis in all &slea; minor releases under LTS.
+versions of a new major version of GCC on a yearly basis in all &suselinux; minor releases under LTS.
The runtime libraries are updated during maintenance updates.
- The runtime libraries are fully supported during the general support and LTS of each &slea;
+ The runtime libraries are fully supported during the general support and LTS of each &suselinux;
minor version.
@@ -196,7 +199,7 @@ The runtime libraries are updated during maintenance updates.
The GNU Binutils
- The GNU Binutils are upgraded to the latest upstream version in all &slea; 16 minor releases
+ The GNU Binutils are upgraded to the latest upstream version in all &suselinux; 16 minor releases
under general support of LTS.
@@ -204,7 +207,7 @@ The runtime libraries are updated during maintenance updates.
The GNU project debugger
- GDB is updated to the newest major version on all &slea; 16 minor releases under general support
+ GDB is updated to the newest major version on all &suselinux; 16 minor releases under general support
or LTS. However, this means that certain functionality may be removed from the package.
@@ -213,7 +216,7 @@ The runtime libraries are updated during maintenance updates.
LLVM
LLVM is available for use exclusively with MESA. Any other use of LLVM is not supported. Front-ends like Clang are not provided on &slea;. You
+ role="bold">not supported. Front-ends like Clang are not provided on &suselinux;. You
may get them only from a community-supported repository.
@@ -222,12 +225,12 @@ The runtime libraries are updated during maintenance updates.
Compatibility and deprecation policy
&suse; maintains backward dynamic-linking compatibility for glibc and the C++
- compiler runtime library. This means that a binary built on an earlier minor version of &slea;
+ compiler runtime library. This means that a binary built on an earlier minor version of &suselinux;
16 runs correctly on a later minor release.
Features deprecated by upstream are removed either from newer major versions of compilers or
- from all toolchain components in later &slea; minor versions. In the case of
+ from all toolchain components in later &suselinux; minor versions. In the case of
glibc, the deprecated symbols are then removed from header files and are
not available for link editing. Therefore, the code using these symbols will no longer compile or (statically) link.
diff --git a/concepts/packages_lifecycle_types.xml b/concepts/packages_lifecycle_types.xml
index a2035ff13..a6efda812 100644
--- a/concepts/packages_lifecycle_types.xml
+++ b/concepts/packages_lifecycle_types.xml
@@ -23,7 +23,7 @@
- On &sle; 16, component packages are sorted into different lifecycle categories. This section describes
+ On &suselinux; 16, component packages are sorted into different lifecycle categories. This section describes
the criteria for such a sorting.
@@ -40,7 +40,7 @@
Stable
- Packages that are marked as stable* (also called conservative) are those that do not deliver a disruptive change
+ Packages that are marked as stable (also called conservative) are those that do not deliver a disruptive change
while a customer is on any of the 16 minor versions. During the upgrade to another minor
version, the package version may change but the newer version does not introduce incompatible behavior. Customers expect to have LTS on
@@ -203,9 +203,10 @@ kernel: the kernel is updated with each minor release
- GCC: a new version of the compiler is released with each minor release with a
- sliding window of 2. However, the libraries libgcc and
- libstdc++ are categorized as stable.
+ GCC: a new version of the compiler is released with each minor release. Those introduced in odd minor versions
+ are not used by default and have a shorter support period. See
+ for details. However, the libraries
+ libgcc and libstdc++ are categorized as stable.
diff --git a/concepts/packages_lifecycle_valkey.xml b/concepts/packages_lifecycle_valkey.xml
new file mode 100644
index 000000000..a783c15f2
--- /dev/null
+++ b/concepts/packages_lifecycle_valkey.xml
@@ -0,0 +1,66 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Valkey update strategy
+
+
+
+
+ Valkey is a high-performance data structure server that primarily serves key/value workloads. It supports a wide range of native structures and an extensible plug-in system for adding new data structures and access patterns.
+
+
+
+
+ On &suselinux;; 16, Valkey is updated and supported according to the following rules:
+
+
+
+
+ The Valkey version is kept in sync with upstream
+
+
+
+
+ Each &suselinux; 16 minor release has the latest available version
+
+
+
+
+Valkey is supported for all &suselinux; 16 minor releases under the general support and LTS
+
+
+
+
+ Backward compatible Valkey minor releases are published promptly after the corresponding upstream release
+
+
+
+
+ Patch version updates are delivered continuously to &suselinux; minor releases as soon as the
+ updates are released
+
+
+
+
+ Security fixes are backported if the upstream project does not issue a new release incorporating the fix
+
+
+
+
diff --git a/concepts/sap-for-ansible-intro.xml b/concepts/sap-for-ansible-intro.xml
new file mode 100644
index 000000000..d63411b3c
--- /dev/null
+++ b/concepts/sap-for-ansible-intro.xml
@@ -0,0 +1,35 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Introduction to Ansible Automation for for &sles4sap; 16
+
+
+
+
+
+ Ansible Automation is required for SAP landscapes because manually managing the complex, critical and often vast SAP environments is time-consuming, error-prone and struggles to meet
+ modern demands for agility and high availability. Leveraging Ansible allows organizations to implement Infrastructure-as-Code (IaC) principles for SAP, ensuring speed, consistency, and repeatability in
+ vital tasks like provisioning new systems, patching, configuration management and performing complex operational procedures.
+
+
+
+ This automation minimizes the risk of human error, enables High Availability, significantly accelerates change management and frees up skilled SAP Basis administrators to focus on strategic, high-value work rather than repetitive and manual operations.
+&suse; provides you with a powerful set of Ansible collections and playbooks designed specifically for &sles4sap; 16.
+
\ No newline at end of file
diff --git a/concepts/selinux-modes.xml b/concepts/selinux-modes.xml
index 565a01ea5..dae8fbe52 100644
--- a/concepts/selinux-modes.xml
+++ b/concepts/selinux-modes.xml
@@ -42,7 +42,9 @@
-The default in &productnameshort; &productnumber; is enforcing mode.
+
+The default in &productnameshort; &productnumber; is enforcing mode.
+ The default in &productname; &productnumber; is enabled in permissive mode, if &sap; patterns are installed.
For information about switching between &selnx; modes, refer to
.
diff --git a/concepts/selinux-policy.xml b/concepts/selinux-policy.xml
index cd7a44d57..533683d85 100644
--- a/concepts/selinux-policy.xml
+++ b/concepts/selinux-policy.xml
@@ -39,4 +39,8 @@
This allows the administrator to customize policies for different parts of the
system.
+ About &selnx; for &productname; &productnumber;
+ Theselinux-policy-sapenablement package is for &selnx; policy changes for running &sap;.
+ Currently it sets the settings that are needed, but still sets &selnx; to permissive mode.
+ This package is installed by default.
diff --git a/concepts/sle16.xml b/concepts/sle16.xml
index 44bfb8b54..06a1f6bc0 100644
--- a/concepts/sle16.xml
+++ b/concepts/sle16.xml
@@ -116,7 +116,7 @@
1 To improve overall performance, &slsa; 16 also delivers some
shared libraries optimized for x86-64-v3. These libraries will be installed and used
automatically on systems supporting v3. You can tell a package was optimized if its name
- includes the string x86_42_v3.
+ includes the string x86-64-v3.
2
@@ -140,7 +140,7 @@
- &slea; 16 introduces a new naming schema for versions, using minor releases like 16.0,
+ &suselinux; 16 introduces a new naming schema for versions, using minor releases like 16.0,
16.1, instead of service packs. Minor releases are scheduled annually and always in
November. The initial release, 16.0, is planned for November 2025.
@@ -165,7 +165,7 @@
Each &slsa; 16 minor release has 24 months of general support, giving you 12 months
overlap with the next minor release. This means &sls; doubles the time to upgrade at
no extra cost. The last minor release 16.6 will have 48 months of general support to
- complete a 10 year lifecycle. General support is expected to end in in November 2035.
+ complete a 10 year lifecycle. General support is expected to end in November 2035.
Additional Long Term Support (LTS) is available for 3 years for all service packs,
with LTS and Extreme LTS beyond 2040.
&sles4sap; 16 has general support for 2 years plus 3 years extended support.
@@ -185,7 +185,7 @@
- In &slea; 16, systems management sees significant changes:
+ In &suselinux; 16, systems management sees significant changes:
@@ -210,7 +210,7 @@
- These changes in &slea; 16 aim to provide better integration with &salt; and &ansible;,
+ These changes in &suselinux; 16 aim to provide better integration with &salt; and &ansible;,
allow remote installation via a Web browser, and offer different interfaces (Web, CLI,
HTTP API) for controlling the process.
@@ -228,7 +228,7 @@
- &slea; 16 ships &selnx; with policies for over 400 modules by default, confining almost
+ &suselinux; 16 ships &selnx; with policies for over 400 modules by default, confining almost
the whole system. Parts of the system can run unconfined if desired.
&sles4sap; 16 has improved workload isolation with &selnx;, providing isolation for data
and data flows, and for containers.
@@ -247,7 +247,7 @@
- &slea; 16 focuses on a single stack using &nm;.
+ &suselinux; 16 focuses on a single stack using &nm;.
@@ -266,7 +266,7 @@
- &slea; 16 uses ready-to-run images for virtual machines or public clouds, and
+ &suselinux; 16 uses ready-to-run images for virtual machines or public clouds, and
manual/automated deployment with &agama;. The &agama; installer is accessible via a Web
interface, command line or HTTP API. &agama; allows repeated configuration import. It uses a
standard live medium with dracut, &systemd;, and NetworkManager and
@@ -287,7 +287,7 @@
- &slea; 16 completes the transition from SysV init and uses &systemd; native units
+ &suselinux; 16 completes the transition from SysV init and uses &systemd; native units
exclusively.
@@ -304,7 +304,7 @@
- In &slea; 16, defaults are in /usr and /etc is
+ In &suselinux; 16, defaults are in /usr and /etc is
for customization. This separation of vendor-provided defaults and administrator
customization allows easier updates and — optionally — read-only
/usr file system.
@@ -324,7 +324,7 @@
- In &slea; 16, /tmp uses the /tmp uses the tmpfs file system, which stores data in physical RAM
and/or swap space, making it very fast for read and write operations.
@@ -341,7 +341,7 @@
Do not oversize /tmp
Oversizing tmpfs file system instances
- beyond the the sum of physical RAM and swap space of a system can lead to deadlock
+ beyond the sum of physical RAM and swap space of a system can lead to deadlock
situations where the system becomes unresponsive due to insufficient available memory.
@@ -360,7 +360,7 @@
- &slea; 16 uses a minimal &gnome; desktop. This helps minimize the security surface.
+ &suselinux; 16 uses a minimal &gnome; desktop. This helps minimize the security surface.
&sled; is not planned for 16.0.
@@ -466,7 +466,7 @@
Removed technology
- &slea; 16 removes certain technologies to reduce the OS footprint and the security attack
+ &suselinux; 16 removes certain technologies to reduce the OS footprint and the security attack
vector:
@@ -500,8 +500,8 @@
- Virtualization: &xen; was removed in favor of &kvm;. You can no longer run &slea; 16 as
- &xen; host or as paravirtualized guest (PV). Running &slea; 16 as fully virtualized &xen;
+ Virtualization: &xen; was removed in favor of &kvm;. You can no longer run &suselinux; 16 as
+ &xen; host or as paravirtualized guest (PV). Running &suselinux; 16 as fully virtualized &xen;
guest (HVM) or using hardware virtualization features (PVH) is still possible.
diff --git a/concepts/supportconfig-overview.xml b/concepts/supportconfig-overview.xml
index 67956df11..366b6c96b 100644
--- a/concepts/supportconfig-overview.xml
+++ b/concepts/supportconfig-overview.xml
@@ -36,7 +36,8 @@
The command-line tool is provided by the package supportutils, which is installed by default.
If the package is not installed, install it with:
- &prompt.sudo;zypper install supportutils
+ &prompt.sudo;zypper install supportutils
+ &prompt.sudo;transactional-update pkg in supportutils
The supportconfig tool can integrate plug-ins that run
automatically each time the command is executed. Which plug-ins are available on your system, depends on the installed packages. The plug-ins
diff --git a/concepts/systemd-securing.xml b/concepts/systemd-securing.xml
index 31ec4a133..36a426d5d 100644
--- a/concepts/systemd-securing.xml
+++ b/concepts/systemd-securing.xml
@@ -14,7 +14,7 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- Secure &systemd; services
+ Securing &systemd; services
Linux increases its security by separating privileges between individual components of the
@@ -30,23 +30,193 @@
them from certain privileges that normal users are allowed to use.
-
+
-
- How does securing services with &systemd; work?
-
- There are several methods to secure processes and applications that you can use
- simultaneously. For example, confining with &selnx; or &aa; is
- recommended. &systemd; can apply additional restrictions to local services by using
- technologies included in the kernel. These restrictions are activated by adding specific
- options to the &systemd; service definition and restarting the service.
-
-
-
- Benefits of securing services
+
+ Why is securing &systemd; services important?
Securing &systemd; services increases the security of the whole operating system and protects
- sensitive data contained on its file system.
+ sensitive data contained on its file system. With &systemd;, you can configure your system in many ways.
+ &systemd; runs as the first process on boot (PID1) so it has a lot of power on your Linux environment.
+ &systemd; can apply additional restrictions to local services by using technologies included in the kernel.
+ These restrictions are activated by adding specific options to the systemd service definition and restarting the service.
+ &systemd; has a command-line tool systemd-analyze security. This command analyzes the services and checks
+ whether the services are using its security options.
+
+
+ What is the systemd-analyze security command?
+
+ The command analyzes the security and sandboxing settings of the specified service units.
+ A detailed analysis of the security settings is executed and displayed.
+ If a service unit is not specified, all currently loaded, long-running service units are inspected and the results are displayed in a terse table.
+
+ Upon checking the security settings, the command assigns a numeric value , also known as exposure level.
+ This value depends on how important a setting is. It then calculates the overall exposure level for the entire unit. This value ranges
+ from 0.0 to 10.0, which is an indicator of how exposed a service is security-wise.
+ High exposure levels indicate that the service might benefit from additional security settings.
+ Low exposure levels indicate tight security restrictions.
+
+
+
+ Using the systemd-analyze security command
+
+ Use the systemd-analyze security command to analyze the security settings of a &systemd; service.
+ The security option analyzes the security and the sandboxing settings of one or more specified services.
+
+
+ Create a &systemd; service in the /etc/systemd/system.
+
+ Reload the service files to include the new service:
+ &prompt.sudo; systemctl daemon-reload
+
+ Start, enable and check the status of the service:
+ &prompt.sudo; systemctl start SERVICE_NAME
+ &prompt.sudo;systemctl enable SERVICE_NAME
+ &prompt.sudo; systemctl status SERVICE_NAME
+
+
+ Analyze the security settings of the service:
+ &prompt.sudo; systemd-analyze security SERVICE_NAME
+ For example:
+ &prompt.sudo; systemd-analyze security test.service
+ NAME DESCRIPTION EXPOSURE
+ ✗ PrivateNetwork= Service has access to the host's network 0.5
+ ✗ User=/DynamicUser= Service runs as root user 0.4
+ ✗ DeviceAllow= Service has no device ACL
+ ...
+ → Overall exposure level for test.service: 9.6 UNSAFE 😨
+
+
+
+
+
+ How to improve the overall exposure
+ If you get 9.6 UNSAFE, you can use [Service] part of the service definition file to add any of the below options. For example:
+
+ [Service]
+ NoNewPrivileges=yes
+ PrivateTmp=yes
+ PrivateNetwork=yes
+ InaccessibleDirectories=/home
+ .....
+
+
+
+ NoNewPrivileges=yes
+
+
+ New privileges are not required.
+
+
+
+
+ PrivateTmp=yes
+
+
+ Private directory for temporary files. This option provides the service with a private /tmp isolated from
+ the host system's /tmp. The shared host /tmp
+ directory is a major source of security problems, such as symlink attacks and DoS
+ /tmp temporary files.
+
+
+
+
+ PrivateNetwork=yes
+
+
+ This option isolates the service and its processes from networking. This prevents
+ external network requests from reaching the protected service. Be aware that certain
+ services require the network to be operational.
+
+
+
+
+ InaccessibleDirectories=/home
+
+
+ This option makes the specified directories inaccessible to the service. This option
+ narrows the range of directories that can be read or modified by the service, for
+ example, to secure users' private files.
+
+
+
+
+ ReadOnlyDirectories=/var
+
+
+ This option makes the specified directories inaccessible for writing to the service. The
+ example configuration makes the whole tree below /var read-only.
+ This option prevents the service from damaging the system files.
+
+
+
+
+ CapabilityBoundingSet=CAP_CHOWN CAP_KILL
+
+
+ This option restricts the kernel capabilities that a service can retain. In the example
+ above, only the CAP_CHOWN and CAP_KILL capabilities
+ are retained by the service, and the service and any processes it creates cannot obtain
+ any other capabilities, not even via setuid binaries.
+
+
+ The pscap command tool
+
+ To easily identify which processes on your system retain which capabilities, use the
+ pscap command tool from the libcap-ng-utils package.
+
+
+
+
+ The ~ prefix inverts the meaning of the option—. Instead of
+ listing all capabilities that the service retains, you can list the ones it does not
+ retain:
+
+ ...
+ [Service]
+ CapabilityBoundingSet=~CAP_SYS_PTRACE
+ ...
+
+
+
+
+
+ LimitNPROC=1, LimitFSIZE=0
+
+
+ You can use resource limits to apply security limits on services.
+ Two of them can disable specific operating system features:
+ disables process forking, while
+ disables creating non-empty files on the file system.
+
+
+
+
+ DeviceAllow=/dev/null rw
+
+
+ This option limits access to /dev/null, disallowing access to any
+ other device nodes.
+
+
+
+
+ These are some options you can use.
+
+ NoNewPrivileges=yes option example
+ This example shows a basic script configured to run as a specific user with privilege escalation strictly disabled.
+
+[Unit]
+Description=Simple Secure Service
+
+[Service]
+# Run as a non-root user
+User=john
+Group=john
+
+# Disable gaining new privileges (setuid, setgid, etc.)
+NoNewPrivileges=yes
+
-
+
diff --git a/concepts/systemd-targets.xml b/concepts/systemd-targets.xml
index 4d5fcece8..aadfa125f 100644
--- a/concepts/systemd-targets.xml
+++ b/concepts/systemd-targets.xml
@@ -15,22 +15,26 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- &systemd; targets
+ Understanding &systemd; targets
&systemd; uses units and targets. A &systemd; unit defines a service or action on the system, which consists of a name, type, and configuration file.
- A &systemd; target combines several units and defines which services have to be started to reach the target.
+
+
+
+
+What are &systemd; targets?
+
+ A &systemd; target combines several units and defines which services have to be started to reach the target.
On a server, for example, this is a state where the network is running and multiple users can log in.
These files are identified by the suffix .target.
Similar to unit files, different targets may be nested via dependencies. For example, multi-user.target requires (among others) the targets
that set up login and user session services.
-
-
-
+
Common &systemd; targets:
@@ -87,7 +91,121 @@
- For more information on &systemd; targets, refer to man 5 systemd.target
- and man 7 systemd.special.
+
+
+Setting up a &systemd; target
+To set up a &systemd; target, you need to create a new target unit file and define any required dependencies.
+ A target is a group of unit files that manages the state of services. A target can be used to control groups of services
+ or to define system states.
+
+
+ Create a new file with the .target extension in /etc/systemd/system/.
+ For example:
+ &prompt.sudo; vi /etc/systemd/system/test.target
+
+
+ Define the target with basic configuration.
+ For example:
+[Unit]
+Description= Test target
+Requires=multi-user.target
+After=multi-user.target
+
+[Install]
+WantedBy=multi-user.target
+
+
+
+
+ Requires: specifies that multi-user.target must be reached
+ for test.target to be active.
+
+
+
+ After: specifies that this target starts after the multi-user.target.
+
+
+
+
+ WantedBy: specifies that this target should be part of multi-user.target.
+
+
+
+
+
+ Set permissions for the target file.
+ For example:
+ &prompt.sudo; chmod 644 /etc/systemd/system/test.target
+
+
+ Link the required services to the target file. Create a .wants directory and then create
+ symbolic links to the services you want to include in your custom target.
+ For example:
+ &prompt.sudo; mkdir -p /etc/systemd/system/custom.target.wants/
+ &prompt.sudo; ln -s /etc/systemd/system/test.service /etc/systemd/system/custom.target.wants/
+
+
+
+ Reload &systemd;.
+ For example:
+ &prompt.sudo; systemctl daemon-reload
+
+ Start and enable the target.
+ For example:
+ &prompt.sudo; systemctl start test.target
+ &prompt.sudo; systemctl enable test.target
+
+
+ Verify the status of the custom target.
+ For example:
+ &prompt.sudo; systemctl status test.target
+
+
+ Verify all the units that are part of this target.
+ For example:
+ &prompt.sudo; systemctl list-dependencies test.target
+
+ Your custom &systemd; target is set up and configured.
+
+ You can use the following common configuration options:
+
+
+
+ Description: A human-readable description of the target.
+
+
+
+
+ Documentation: URIs pointing to documentation.
+
+
+
+ AllowIsolate:
+ If set to true, this unit can be used with the systemctl isolate command.
+
+
+
+
+ Wants:
+ Units that can be started but are not mandatory.
+
+
+
+
+ After/Before:
+ Defines the order of dependencies.
+
+
+
+
+ Conflicts:
+ Units that should not be active while this target is active.
+
+
+
+For more information on &systemd; targets, refer to man 5 systemd.target
+ and man 7 systemd.special.
+
+
diff --git a/concepts/technical-diff-basesytem.xml b/concepts/technical-diff-basesytem.xml
index 1a53f3968..ec2795f03 100644
--- a/concepts/technical-diff-basesytem.xml
+++ b/concepts/technical-diff-basesytem.xml
@@ -30,7 +30,7 @@
Are there any changes to the boot loader in &slea; 16?
- The boot loader in &slea; 16 is &grub; 2.12. BLS and &systemd;-boot are not used now, but may
+ The boot loader in &suselinux; 16 is &grub; 2.12. BLS and &systemd;-boot are not used now, but may
be added in the future.
@@ -45,12 +45,12 @@
to /usr. This solution enables you to maintain your settings in
/etc without worrying about maintenance updates overwriting them. Even though many packages follow the paradigm, certain tools are still
shipped with regular configuration files located in /etc. The process is
- still ongoing, and we expect that the other tools will be adapted in a future &slea; release.
+ still ongoing, and we expect that the other tools will be adapted in a future &suselinux; release.
You have three options for how to modify the configuration of a service or tool. For example, the
tool frobnicator usually has configuration files in
- /etc/frob.conf. Starting with &slea; 16.0, the default configuration file
+ /etc/frob.conf. Starting with &suselinux; 16.0, the default configuration file
of the tool is /usr/etc/frob.conf. Do not modify this file as it may be
overwritten by future maintenance updates. To apply your custom changes, you have the following options:
@@ -94,9 +94,9 @@ This solution can lead to unexpected results if you have tools that try to updat
- Deprecated technologies in the base system
+ Removed technologies in the base system
- The following technologies have been deprecated:
+ The following technologies have been removed:
@@ -151,7 +151,7 @@ NIS aka Yellow Pages
&amd64;/&intel64;&x86-64;-v2Optimized v3 versions of certain shared libraries are now available. These libraries, which follow
- the naming scheme libfoo-x86_64_v3, are installed automatically on systems
+ the naming scheme libfoo-x86-64-v3, are installed automatically on systems
that support the new instruction set.
@@ -164,8 +164,8 @@ NIS aka Yellow Pages
&ibm; &power;&power; 9 and higher
- &slea; 16.0 is supported on &power; 10 and &power; 11.
-Even though &slea; 16.0 continues to work on &power; 9 (except for KVM), this is not supported by &ibm;.
+ &suselinux; 16.0 is supported on &power; 10 and &power; 11.
+Even though &suselinux; 16.0 continues to work on &power; 9 (except for KVM), this is not supported by &ibm;.&ibm; &power; 32-bit
@@ -190,7 +190,7 @@ Even though &slea; 16.0 continues to work on &power; 9 (except for KVM
Do you still support legacy BIOS on &x86-64;?
- The default for fresh installations of &slea; 16.0 is UEFI.
+ The default for fresh installations of &suselinux; 16.0 is UEFI.
Support for legacy BIOS is deprecated and will be removed in a future version of the product. However, support for legacy BIOS is provided for backward compatibility in the following scenarios:
diff --git a/concepts/technical-diff-desktop.xml b/concepts/technical-diff-desktop.xml
index 43f99260b..84d7f344c 100644
--- a/concepts/technical-diff-desktop.xml
+++ b/concepts/technical-diff-desktop.xml
@@ -28,7 +28,7 @@
- The desktop product has been discontinued. &slea; 16 is available with a minimal desktop
+ The desktop product has been discontinued. &suselinux; 16 is available with a minimal desktop
environment based on GNOME 48, including essential applications such as the Firefox Web browser, file browser, viewers for PDFs and images, and so on.
diff --git a/concepts/technical-diff-installation.xml b/concepts/technical-diff-installation.xml
index a2f7a63ab..aa03aa44f 100644
--- a/concepts/technical-diff-installation.xml
+++ b/concepts/technical-diff-installation.xml
@@ -30,8 +30,10 @@
What happened to &yast;?
- For installation, &slea; 16 comes with installer images based on a new technology called
- &agama;. The single purpose of &agama; is to install a new system and not to serve as a
+ &yast; is the installation and configuration tool used on &sle; 15 that also can be used from the running
+ system. &suselinux; 16 comes with installer images based on a new technology
+ called
+ &agama;. The sole purpose of &agama; is to install a new system and not to serve as a
management tool like &yast; was.
@@ -62,16 +64,16 @@ remote installation: deployment using the PXE client
based on JSON/Jsonnet is also available.
- You can install &slea; 16 using repositories provided by a local &rmt; server inside the
- customer's network. There is no &rmt; version based on &slea; 16 yet. However, &rmt; 15 is
- fully supported as an installation source for &slea; 16.
+ You can install &suselinux; 16 using repositories provided by a local &rmt; server inside the
+ customer's network. There is no &rmt; version based on &suselinux; 16 yet. However, &rmt; 15 is
+ fully supported as an installation source for 16.
Where is the rescue system?
The &agama; installation images no longer come with a separate rescue system. In a future
- release of &slea; 16, we plan to provide a separate image for this purpose.
+ release of 16, we plan to provide a separate image for this purpose. To enter a rescue
shell (without starting &agama;), you can use the live media. Alternatively, you can
use the &agama; image to access the installed for diagnosis and repair in any of the following ways:
diff --git a/concepts/technical-diff-kernel.xml b/concepts/technical-diff-kernel.xml
index cdf164980..f58eefc62 100644
--- a/concepts/technical-diff-kernel.xml
+++ b/concepts/technical-diff-kernel.xml
@@ -45,14 +45,14 @@
- &slea; 16.0 is delivered with kernel version 6.12.
+ &suselinux; 16.0 is delivered with kernel version 6.12.
Support status of file systems
-The following table provides an overview of the file system status in &slea; 16:
+The following table provides an overview of the file system status in &suselinux; 16:
Supported architecture level sets
@@ -112,7 +112,7 @@ The following table provides an overview of the file system status in &slea;&nbs
How do I configure &kdump;?
On &slea; 15 you could configure &kdump; using &yast;. However, with &yast; not available on
- &slea; 16, the kdumptool tool is now used to configure &kdump;. You can
+ &suselinux; 16, the kdumptool tool is now used to configure &kdump;. You can
control settings using the following variables in /etc/sysconfig/kdump:
diff --git a/concepts/technical-diff-management.xml b/concepts/technical-diff-management.xml
index 7c73d5a59..121f87e54 100644
--- a/concepts/technical-diff-management.xml
+++ b/concepts/technical-diff-management.xml
@@ -36,8 +36,8 @@
xlink:href="https://documentation.suse.com/sles/16.0/html/SLES-cockpit/">&cockpit; guide.
-For management automation, &slea; 16 supports &ansible;. &smlm; is another option for managing
-&slea; 16 installations that use &salt; internally. However, &slea; 16 itself does not contain &salt; packages.
+For management automation, &suselinux; 16 supports &ansible;. &smlm; is another option for managing
+&suselinux; 16 installations that use &salt; internally. However, &suselinux; 16 itself does not contain &salt; packages.
Support of WBEM has been dropped (in &slea; 15 it was provided by the SBLIM packages).
@@ -46,16 +46,16 @@ For management automation, &slea; 16 supports &ansible;. &smlm; is another
What has changed in the update stack and package management?
- In &slea; 16, there are no modules anymore to distinguish between, for example, the base
+ In &suselinux; 16, there are no modules anymore to distinguish between, for example, the base
system, server applications or development packages. There are also no longer separate channels
for pool and update. These changes, together with
- improvements to zypper, should result in better performance when applying updates. &slea; minor
+ improvements to zypper, should result in better performance when applying updates. &suselinux; minor
releases (aka 16.1, 16.2 and so on) will continue to have separate repositories.
- &slea; 16 instances can register against (and receive updates from) an RMT server. Currently,
+ &suselinux; 16 instances can register against (and receive updates from) an RMT server. Currently,
the RMT service has to run on a &slea; 15 instance as we do not support the RMT service on
- &slea; 16 yet. RMT on &slea; 16 is expected to be delivered with &slea; 16.1.
+ &suselinux; 16 yet. RMT on &suselinux; 16 is expected to be delivered with &suselinux; 16.1.
diff --git a/concepts/technical-diff-migration.xml b/concepts/technical-diff-migration.xml
index 77fa3fcde..1ef3a7503 100644
--- a/concepts/technical-diff-migration.xml
+++ b/concepts/technical-diff-migration.xml
@@ -28,7 +28,7 @@
-For migration from &slea; 15 to &slea; 16, we use a new approach to migration based on a technology called &suse; Migration Services.
+For migration from &slea; 15 to &suselinux; 16, we use a new approach to migration based on a technology called &suse; Migration Services.
This technology uses a special medium that attempts the migration in several
@@ -43,7 +43,7 @@ For migration from &slea; 15 to &slea; 16, we use a new approach to mi
Migration of a running system is no longer provided.
- &slea; 16.1 is the target release for delivering full migration support. 16.0 provides limited
+ &suselinux; 16.1 is the target release for delivering full migration support. 16.0 provides limited
migration capabilities, supporting only migrations from &slea; 15 SP7. Furthermore, the
tools in 16.0 do not cover all aspects of migration.
diff --git a/concepts/technical-diff-security.xml b/concepts/technical-diff-security.xml
index 23c5dd71e..43456885d 100644
--- a/concepts/technical-diff-security.xml
+++ b/concepts/technical-diff-security.xml
@@ -31,7 +31,7 @@
What is the status of &selnx;?
In standard installations, &selnx; is enabled by default and set to enforcing
- mode. When configuring a &slsa; or &sles4sapa; applications system to run &sap;, &selnx; is changed to permissive mode transparently. Future &sap; notes may further clarify details on how to modify the &selnx; setup to guarantee optimal performance.
+ mode. When configuring a &slsa; or &sles4sapa; applications system to run &sap;, &selnx; is changed to permissive mode transparently.
For details on &selnx;, refer to the
Are you changing the rules for &rootuser; access?
- Starting with &slea; 16, &rootuser; access is modified to be more secure by default. For users
+ Starting with &suselinux; 16, &rootuser; access is modified to be more secure by default. For users
with physical access to the system (that is, text console, serial, graphical desktop), login as
&rootuser; is still allowed by default. There is no change in behavior compared to &slea; 15.
However, for remote access via SSH, &rootuser; login is disabled when trying to use
@@ -56,7 +56,7 @@
What about sudo?
In &slea; (15 and older), &sudo; is configured to prompt for the password of the target user (the account you want to switch to).
- In &slea; 16, we have strengthened the security of the default setup:
+ In &suselinux; 16, we have strengthened the security of the default setup:
@@ -86,7 +86,7 @@
- This new policy is implemented with the sudo-policy-wheel-auth-self package, which is installed by default on new &slea; 16.0 systems.
+ This new policy is implemented with the sudo-policy-wheel-auth-self package, which is installed by default on new &suselinux; 16.0 systems.
diff --git a/concepts/technical-diff-virtualization.xml b/concepts/technical-diff-virtualization.xml
index d5303ec83..c7399db4e 100644
--- a/concepts/technical-diff-virtualization.xml
+++ b/concepts/technical-diff-virtualization.xml
@@ -28,6 +28,6 @@
- &slea; 16 continues to support &kvm;. Support for &xen; has been dropped.
+ &suselinux; 16 continues to support &kvm;. Support for &xen; has been dropped.
diff --git a/concepts/understanding-agama-installation-profiles.xml b/concepts/understanding-agama-installation-profiles.xml
index 47d267578..2a4c59d45 100644
--- a/concepts/understanding-agama-installation-profiles.xml
+++ b/concepts/understanding-agama-installation-profiles.xml
@@ -29,21 +29,20 @@
configuration file that specifies how the system should be set up. This profile describes
various aspects of the installation, including partitioning, networking, software
selection, and other options. The concept of using a profile for automated installation is
- similar to Auto&yast;. &agama; focuses specifically on the installation process itself and
+ similar to &ay;. &agama; focuses specifically on the installation process itself and
delegates further system configuration to other tools. &agama; aims for a high level of
- backward compatibility with Auto&yast; profiles for automated installations.
+ backward compatibility with &ay; profiles for automated installations.
- Difference between &agama; and Auto&yast; profiles
+ Difference between &agama; and &ay; profiles
- &agama; and Auto&yast; profiles are largely compatible for all common use cases. However,
- &agama; profiles are not fully compatible with Auto&yast; profiles, and cannot be used as
+ &agama; and &ay; profiles are largely compatible for all common use cases. However,
+ &agama; profiles are not fully compatible with &ay; profiles, and cannot be used as
a drop-in replacement without checking the compatibility. There are certain aspects of
- the Auto&yast; profiles that are currently supported in &agama; profiles, or may be
+ the &ay; profiles that are currently supported in &agama; profiles, or may be
supported in the future. However, there are certain other aspects that are neither
currently supported in &agama; profiles, nor will be supported in the future. For more
- information, refer to the section
- .
+ information, refer to the section .
@@ -51,7 +50,7 @@
Introduction to the &agama; profile structure
- &agama; profile configuration is defined using a JSON document. It contains several sections
+ The &agama; profile configuration is defined using a JSON document. It contains several sections
that are necessary for describing the installation parameters for a customized system. At a
high level, the profile consists of the following sections:
@@ -92,7 +91,7 @@
- localization: Language, keyboard, and timezone settings.
+ localization: Language, keyboard, and time zone settings.
@@ -112,7 +111,7 @@
- bootloader: Bootloader config and kernel params.
+ bootloader: Boot loader config and kernel params.
@@ -137,7 +136,7 @@
- legacyAutoyastStorage: Support for legacy AutoYaST JSON-style storage.
+ legacyAutoyastStorage: Support for legacy &ay; JSON-style storage.
@@ -158,8 +157,8 @@
For more information on the JSON and Jsonnet profiles, refer to the resources mentioned in
- the section. The upstream
- resources usually contains most updated information and examples about the profiles.
+ . The upstream
+ resources usually contain most updated information and examples about the profiles.
@@ -167,10 +166,9 @@
A minimal &agama; JSON profile must at least include sections for product identification,
product registration, and credentials for the root user. &agama; uses the defaults for the
- rest of the profile. As a best practice, you should also configure the following as a best
- practice:
+ rest of the profile. As a best practice, you should also configure the following:
- A hostname
+ A host name
A non-root user
Minimal localization settings
@@ -211,7 +209,7 @@
- You can generated a hashed password by running the following command:
+ You can generate a hashed password by running the following command:
&prompt.sudo;openssl passwd -6
@@ -222,8 +220,8 @@
&prompt.sudo;ssh-keygen -t rsa -b 4096 -C "YOUR-EMAIL@EXAMPLE.COM"
- Based on your requirements, choose the key type and the key size. However, it's better
- to adopt a stronger security.
+ Based on your requirements, choose the key type and the key size. However, it is better
+ to adopt stronger security.
diff --git a/glues/agama-installation-more-info.xml b/glues/agama-installation-more-info.xml
index 7c3ee840a..86b81c71f 100644
--- a/glues/agama-installation-more-info.xml
+++ b/glues/agama-installation-more-info.xml
@@ -29,7 +29,7 @@
Documentation of the &agama; open-source project:
-
+
diff --git a/glues/ansible-sap-more-info.xml b/glues/ansible-sap-more-info.xml
new file mode 100644
index 000000000..df1bae10c
--- /dev/null
+++ b/glues/ansible-sap-more-info.xml
@@ -0,0 +1,48 @@
+
+
+
+
+ %entities;
+]>
+
+
+ For more information
+
+
+
+
+
+
+
+ Refer to the following resources:
+
+
+
+
+ Introduction to Ansible Core
+ .
+
+
+
+
+ Ansible Linux System Roles
+ .
+
+
+
+
+ Documentation of open source Ansible playbooks for the deployment of various &sap; software solutions:
+
+
+
+
+
diff --git a/glues/more-info-automated-installation-agama.xml b/glues/more-info-automated-installation-agama.xml
index da39f8ec2..1129ed4dd 100644
--- a/glues/more-info-automated-installation-agama.xml
+++ b/glues/more-info-automated-installation-agama.xml
@@ -38,13 +38,13 @@
&agama; user
documentation: Organizes information by user perspective and covers a wide range of
- topics, including interactive installation, unattended installation, Auto&yast; support,
+ topics, including interactive installation, unattended installation, &ay; support,
boot options, URLs, command-line reference, and remote access.
- &agama; boot
+ &agama; boot
options: Explains the kernel boot parameters that can be used to influence the
&agama; installation process, including how to specify the URL for an unattended
installation profile using inst.auto. It also mentions other useful
@@ -54,7 +54,7 @@
- &agama;
+ &agama;
storage configuration: A deep dive into storage configuration of target
deployments using &agama;. Essential for users with complex storage devices and
partitions.
@@ -63,15 +63,15 @@
&agama;
- Auto&yast; compatibility reference: Essential for users migrating from
- Auto&yast; or planning to reuse Auto&yast; profiles. It details the support status of
- various Auto&yast; profile sections and elements within &agama;.
+ xlink:href="https://agama-project.github.io/docs/user/reference/autoyast">&agama;
+ &ay; compatibility reference: Essential for users migrating from
+ &ay; or planning to reuse &ay; profiles. It details the support status of
+ various &ay; profile sections and elements within &agama;.
- &agama; CLI
+ &agama; CLI
reference: A complete list of all the &agama; commands.
@@ -80,7 +80,7 @@
&agama; project on GitHub:
- Contains source code for the &agama; installer, which you can use to deep dive into the
+ Contains the source code for the &agama; installer, which you can use to deep dive into the
installer's internals.
@@ -95,7 +95,7 @@
&agama;
- profile example in Jsonnet.
+ profile example in Jsonnet
diff --git a/glues/ntp-more-info.xml b/glues/ntp-more-info.xml
index 93344f7ac..a0551f498 100644
--- a/glues/ntp-more-info.xml
+++ b/glues/ntp-more-info.xml
@@ -20,13 +20,16 @@
Securing your computer with &firewalld; is described in
- .
+ .
Commands for operating &systemd; services are listed in
- .
+ .
diff --git a/images/default-vm-options.png b/images/default-vm-options.png
new file mode 100644
index 000000000..8fb4a486a
Binary files /dev/null and b/images/default-vm-options.png differ
diff --git a/images/libvirt_vmm_boot.png b/images/libvirt_vmm_boot.png
new file mode 100644
index 000000000..f8c50848f
Binary files /dev/null and b/images/libvirt_vmm_boot.png differ
diff --git a/images/libvirt_vmm_conndetails.png b/images/libvirt_vmm_conndetails.png
new file mode 100644
index 000000000..30acab963
Binary files /dev/null and b/images/libvirt_vmm_conndetails.png differ
diff --git a/images/libvirt_vmm_controller.png b/images/libvirt_vmm_controller.png
new file mode 100644
index 000000000..6db568e26
Binary files /dev/null and b/images/libvirt_vmm_controller.png differ
diff --git a/images/libvirt_vmm_desc.png b/images/libvirt_vmm_desc.png
new file mode 100644
index 000000000..2d2a2e04f
Binary files /dev/null and b/images/libvirt_vmm_desc.png differ
diff --git a/images/libvirt_vmm_details.png b/images/libvirt_vmm_details.png
new file mode 100644
index 000000000..040b4d7b5
Binary files /dev/null and b/images/libvirt_vmm_details.png differ
diff --git a/images/libvirt_vmm_input.png b/images/libvirt_vmm_input.png
new file mode 100644
index 000000000..790ba0e78
Binary files /dev/null and b/images/libvirt_vmm_input.png differ
diff --git a/images/libvirt_vmm_memory.png b/images/libvirt_vmm_memory.png
new file mode 100644
index 000000000..8ad6ae48e
Binary files /dev/null and b/images/libvirt_vmm_memory.png differ
diff --git a/images/libvirt_vmm_network.png b/images/libvirt_vmm_network.png
new file mode 100644
index 000000000..f03fcf974
Binary files /dev/null and b/images/libvirt_vmm_network.png differ
diff --git a/images/libvirt_vmm_overview.png b/images/libvirt_vmm_overview.png
new file mode 100644
index 000000000..1ec81ab96
Binary files /dev/null and b/images/libvirt_vmm_overview.png differ
diff --git a/images/libvirt_vmm_performance.png b/images/libvirt_vmm_performance.png
new file mode 100644
index 000000000..547dcf41a
Binary files /dev/null and b/images/libvirt_vmm_performance.png differ
diff --git a/images/libvirt_vmm_polling_charts.png b/images/libvirt_vmm_polling_charts.png
new file mode 100644
index 000000000..c575cf3f3
Binary files /dev/null and b/images/libvirt_vmm_polling_charts.png differ
diff --git a/images/libvirt_vmm_processor.png b/images/libvirt_vmm_processor.png
new file mode 100644
index 000000000..6ab4e15ea
Binary files /dev/null and b/images/libvirt_vmm_processor.png differ
diff --git a/images/libvirt_vmm_storage1.png b/images/libvirt_vmm_storage1.png
new file mode 100644
index 000000000..30a90ff41
Binary files /dev/null and b/images/libvirt_vmm_storage1.png differ
diff --git a/images/libvirt_vmm_usb_redirector.png b/images/libvirt_vmm_usb_redirector.png
new file mode 100644
index 000000000..014e8bb45
Binary files /dev/null and b/images/libvirt_vmm_usb_redirector.png differ
diff --git a/images/libvirt_vmm_video.png b/images/libvirt_vmm_video.png
new file mode 100644
index 000000000..9b74b32cb
Binary files /dev/null and b/images/libvirt_vmm_video.png differ
diff --git a/images/libvirt_vmm_vnet_ipv4.png b/images/libvirt_vmm_vnet_ipv4.png
new file mode 100644
index 000000000..573e0a324
Binary files /dev/null and b/images/libvirt_vmm_vnet_ipv4.png differ
diff --git a/images/qemu_sles_vnc.png b/images/qemu_sles_vnc.png
new file mode 100644
index 000000000..d5ce2f93c
Binary files /dev/null and b/images/qemu_sles_vnc.png differ
diff --git a/images/qemu_vnc_pwd.png b/images/qemu_vnc_pwd.png
new file mode 100644
index 000000000..9eb7519e6
Binary files /dev/null and b/images/qemu_vnc_pwd.png differ
diff --git a/images/qemu_win_sles.png b/images/qemu_win_sles.png
new file mode 100644
index 000000000..2226db2c1
Binary files /dev/null and b/images/qemu_win_sles.png differ
diff --git a/images/virt_add_pcidevice.png b/images/virt_add_pcidevice.png
index 1cdabedc0..e7d724939 100644
Binary files a/images/virt_add_pcidevice.png and b/images/virt_add_pcidevice.png differ
diff --git a/images/virt_add_usbdevice.png b/images/virt_add_usbdevice.png
new file mode 100644
index 000000000..93275635e
Binary files /dev/null and b/images/virt_add_usbdevice.png differ
diff --git a/images/virt_virt-manager_storage.png b/images/virt_virt-manager_storage.png
new file mode 100644
index 000000000..72410eb5f
Binary files /dev/null and b/images/virt_virt-manager_storage.png differ
diff --git a/images/virt_virt-manager_storage_add.png b/images/virt_virt-manager_storage_add.png
new file mode 100644
index 000000000..f83c84afc
Binary files /dev/null and b/images/virt_virt-manager_storage_add.png differ
diff --git a/images/virt_vmm_snapshots_list.png b/images/virt_vmm_snapshots_list.png
new file mode 100644
index 000000000..9818f53e9
Binary files /dev/null and b/images/virt_vmm_snapshots_list.png differ
diff --git a/l10n/de-de/DC-HA-snapper-basics b/l10n/de-de/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/de-de/DC-HA-snapper-basics
+++ b/l10n/de-de/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/de-de/DC-SAP-snapper-basics b/l10n/de-de/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/de-de/DC-SAP-snapper-basics
+++ b/l10n/de-de/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/de-de/DC-SLES-snapper-basics b/l10n/de-de/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/de-de/DC-SLES-snapper-basics
+++ b/l10n/de-de/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/de-de/xml/HA-copy-file-rsync.xml b/l10n/de-de/xml/HA-copy-file-rsync.xml
index 36df03ea7..558e9c167 100755
--- a/l10n/de-de/xml/HA-copy-file-rsync.xml
+++ b/l10n/de-de/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Erstellen Sie die Hauptkonfigurationsdatei und tragen Sie die folgenden Zeilen /etc/rsyncd.conf ein:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log ist der Ort, an dem Rsync die Protokolle schreibt.
- /var/lock/rsync.lock ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält.
+ /var/run/rsync.pid ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält. Führt die globalen Werte aus den Dateien /etc/rsyncd.d/*.inc in der Hauptkonfigurationsdatei zusammen.
diff --git a/l10n/de-de/xml/SAP-copy-file-rsync.xml b/l10n/de-de/xml/SAP-copy-file-rsync.xml
index eeceb5b19..3c1d5ead1 100755
--- a/l10n/de-de/xml/SAP-copy-file-rsync.xml
+++ b/l10n/de-de/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Erstellen Sie die Hauptkonfigurationsdatei und tragen Sie die folgenden Zeilen /etc/rsyncd.conf ein:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log ist der Ort, an dem Rsync die Protokolle schreibt.
- /var/lock/rsync.lock ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält.
+ /var/run/rsync.pid ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält. Führt die globalen Werte aus den Dateien /etc/rsyncd.d/*.inc in der Hauptkonfigurationsdatei zusammen.
diff --git a/l10n/de-de/xml/SLES-copy-file-rsync.xml b/l10n/de-de/xml/SLES-copy-file-rsync.xml
index 3d8a3ad82..b34ee5e8a 100755
--- a/l10n/de-de/xml/SLES-copy-file-rsync.xml
+++ b/l10n/de-de/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Erstellen Sie die Hauptkonfigurationsdatei und tragen Sie die folgenden Zeilen /etc/rsyncd.conf ein:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log ist der Ort, an dem Rsync die Protokolle schreibt.
- /var/lock/rsync.lock ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält.
+ /var/run/rsync.pid ist die Datei, die die PID der laufenden Rsync-Daemon-Instanz enthält. Führt die globalen Werte aus den Dateien /etc/rsyncd.d/*.inc in der Hauptkonfigurationsdatei zusammen.
diff --git a/l10n/es-es/DC-HA-snapper-basics b/l10n/es-es/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/es-es/DC-HA-snapper-basics
+++ b/l10n/es-es/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/es-es/DC-SAP-snapper-basics b/l10n/es-es/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/es-es/DC-SAP-snapper-basics
+++ b/l10n/es-es/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/es-es/DC-SLES-snapper-basics b/l10n/es-es/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/es-es/DC-SLES-snapper-basics
+++ b/l10n/es-es/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/es-es/xml/HA-copy-file-rsync.xml b/l10n/es-es/xml/HA-copy-file-rsync.xml
index 79ab66a30..71813b301 100755
--- a/l10n/es-es/xml/HA-copy-file-rsync.xml
+++ b/l10n/es-es/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Cree el archivo de configuración /etc/rsyncd.conf principal y añada las líneas siguientes:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log es la ubicación donde rsync escribe los registros.
- /var/lock/rsync.lock es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución.
+ /var/run/rsync.pid es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución. Fusiona valores globales de los archivos /etc/rsyncd.d/*.inc en el archivo de configuración principal.
diff --git a/l10n/es-es/xml/SAP-copy-file-rsync.xml b/l10n/es-es/xml/SAP-copy-file-rsync.xml
index b4190aa75..6c0469ccd 100755
--- a/l10n/es-es/xml/SAP-copy-file-rsync.xml
+++ b/l10n/es-es/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Cree el archivo de configuración /etc/rsyncd.conf principal y añada las líneas siguientes:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log es la ubicación donde rsync escribe los registros.
- /var/lock/rsync.lock es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución.
+ /var/run/rsync.pid es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución. Fusiona valores globales de los archivos /etc/rsyncd.d/*.inc en el archivo de configuración principal.
diff --git a/l10n/es-es/xml/SLES-copy-file-rsync.xml b/l10n/es-es/xml/SLES-copy-file-rsync.xml
index b718cdf83..6d5739969 100755
--- a/l10n/es-es/xml/SLES-copy-file-rsync.xml
+++ b/l10n/es-es/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Cree el archivo de configuración /etc/rsyncd.conf principal y añada las líneas siguientes:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log es la ubicación donde rsync escribe los registros.
- /var/lock/rsync.lock es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución.
+ /var/run/rsync.pid es el archivo que contiene el ID de proceso de la instancia del daemon de rsync en ejecución. Fusiona valores globales de los archivos /etc/rsyncd.d/*.inc en el archivo de configuración principal.
diff --git a/l10n/fr-fr/DC-HA-snapper-basics b/l10n/fr-fr/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/fr-fr/DC-HA-snapper-basics
+++ b/l10n/fr-fr/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/fr-fr/DC-SAP-snapper-basics b/l10n/fr-fr/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/fr-fr/DC-SAP-snapper-basics
+++ b/l10n/fr-fr/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/fr-fr/DC-SLES-snapper-basics b/l10n/fr-fr/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/fr-fr/DC-SLES-snapper-basics
+++ b/l10n/fr-fr/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/fr-fr/xml/HA-copy-file-rsync.xml b/l10n/fr-fr/xml/HA-copy-file-rsync.xml
index f78c7940e..fc21212d8 100755
--- a/l10n/fr-fr/xml/HA-copy-file-rsync.xml
+++ b/l10n/fr-fr/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Créez le fichier de configuration principal /etc/rsyncd.conf et ajoutez-y les lignes suivantes :log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log est l'emplacement dans lequel rsync écrit les journaux.
- /var/lock/rsync.lock est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution.
+ /var/run/rsync.pid est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution. Fusionne les valeurs globales des fichiers /etc/rsyncd.d/*.inc dans le fichier de configuration principal.
diff --git a/l10n/fr-fr/xml/SAP-copy-file-rsync.xml b/l10n/fr-fr/xml/SAP-copy-file-rsync.xml
index de9e849ac..d4d4f1102 100755
--- a/l10n/fr-fr/xml/SAP-copy-file-rsync.xml
+++ b/l10n/fr-fr/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Créez le fichier de configuration principal /etc/rsyncd.conf et ajoutez-y les lignes suivantes :log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log est l'emplacement dans lequel rsync écrit les journaux.
- /var/lock/rsync.lock est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution.
+ /var/run/rsync.pid est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution. Fusionne les valeurs globales des fichiers /etc/rsyncd.d/*.inc dans le fichier de configuration principal.
diff --git a/l10n/fr-fr/xml/SLES-copy-file-rsync.xml b/l10n/fr-fr/xml/SLES-copy-file-rsync.xml
index 8d28d9cae..76cf939f6 100755
--- a/l10n/fr-fr/xml/SLES-copy-file-rsync.xml
+++ b/l10n/fr-fr/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Créez le fichier de configuration principal /etc/rsyncd.conf et ajoutez-y les lignes suivantes :log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log est l'emplacement dans lequel rsync écrit les journaux.
- /var/lock/rsync.lock est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution.
+ /var/run/rsync.pid est le fichier qui contient l'ID du processus de l'instance du daemon rsync en cours d'exécution. Fusionne les valeurs globales des fichiers /etc/rsyncd.d/*.inc dans le fichier de configuration principal.
diff --git a/l10n/ja-jp/DC-HA-snapper-basics b/l10n/ja-jp/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/ja-jp/DC-HA-snapper-basics
+++ b/l10n/ja-jp/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/ja-jp/DC-SAP-snapper-basics b/l10n/ja-jp/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/ja-jp/DC-SAP-snapper-basics
+++ b/l10n/ja-jp/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/ja-jp/DC-SLES-snapper-basics b/l10n/ja-jp/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/ja-jp/DC-SLES-snapper-basics
+++ b/l10n/ja-jp/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/ja-jp/xml/HA-copy-file-rsync.xml b/l10n/ja-jp/xml/HA-copy-file-rsync.xml
index f4717e048..2172b3029 100755
--- a/l10n/ja-jp/xml/HA-copy-file-rsync.xml
+++ b/l10n/ja-jp/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
メイン設定ファイル/etc/rsyncd.confを作成し、以下の行を追加します。log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.logはrsyncがログを書き込む場所です。
- /var/lock/rsync.lockは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。
+ /var/run/rsync.pidは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。 グローバル値を/etc/rsyncd.d/*.incファイルからメイン設定ファイルにマージします。
diff --git a/l10n/ja-jp/xml/SAP-copy-file-rsync.xml b/l10n/ja-jp/xml/SAP-copy-file-rsync.xml
index fdebd21e5..57f40dbd5 100755
--- a/l10n/ja-jp/xml/SAP-copy-file-rsync.xml
+++ b/l10n/ja-jp/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
メイン設定ファイル/etc/rsyncd.confを作成し、以下の行を追加します。log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.logはrsyncがログを書き込む場所です。
- /var/lock/rsync.lockは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。
+ /var/run/rsync.pidは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。 グローバル値を/etc/rsyncd.d/*.incファイルからメイン設定ファイルにマージします。
diff --git a/l10n/ja-jp/xml/SLES-copy-file-rsync.xml b/l10n/ja-jp/xml/SLES-copy-file-rsync.xml
index 1d8292c40..4e6887aa3 100755
--- a/l10n/ja-jp/xml/SLES-copy-file-rsync.xml
+++ b/l10n/ja-jp/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
メイン設定ファイル/etc/rsyncd.confを作成し、以下の行を追加します。log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.logはrsyncがログを書き込む場所です。
- /var/lock/rsync.lockは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。
+ /var/run/rsync.pidは、実行中のrsyncデーモンインスタンスのPIDを含むファイルです。 グローバル値を/etc/rsyncd.d/*.incファイルからメイン設定ファイルにマージします。
diff --git a/l10n/pt-br/DC-HA-snapper-basics b/l10n/pt-br/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/pt-br/DC-HA-snapper-basics
+++ b/l10n/pt-br/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/pt-br/DC-SAP-snapper-basics b/l10n/pt-br/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/pt-br/DC-SAP-snapper-basics
+++ b/l10n/pt-br/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/pt-br/DC-SLES-snapper-basics b/l10n/pt-br/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/pt-br/DC-SLES-snapper-basics
+++ b/l10n/pt-br/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/pt-br/xml/HA-copy-file-rsync.xml b/l10n/pt-br/xml/HA-copy-file-rsync.xml
index dc4981849..d698949b7 100755
--- a/l10n/pt-br/xml/HA-copy-file-rsync.xml
+++ b/l10n/pt-br/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Crie o arquivo de configuração principal /etc/rsyncd.conf e adicione as seguintes linhas:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log é o local onde o rsync grava os registros.
- /var/lock/rsync.lock é o arquivo que contém o ID do processo da instância do daemon rsync em execução.
+ /var/run/rsync.pid é o arquivo que contém o ID do processo da instância do daemon rsync em execução. Faz a fusão dos valores globais dos arquivos /etc/rsyncd.d/*.inc no arquivo de configuração principal.
diff --git a/l10n/pt-br/xml/SAP-copy-file-rsync.xml b/l10n/pt-br/xml/SAP-copy-file-rsync.xml
index 13ba36ec4..9455f569d 100755
--- a/l10n/pt-br/xml/SAP-copy-file-rsync.xml
+++ b/l10n/pt-br/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Crie o arquivo de configuração principal /etc/rsyncd.conf e adicione as seguintes linhas:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log é o local onde o rsync grava os registros.
- /var/lock/rsync.lock é o arquivo que contém o ID do processo da instância do daemon rsync em execução.
+ /var/run/rsync.pid é o arquivo que contém o ID do processo da instância do daemon rsync em execução. Faz a fusão dos valores globais dos arquivos /etc/rsyncd.d/*.inc no arquivo de configuração principal.
diff --git a/l10n/pt-br/xml/SLES-copy-file-rsync.xml b/l10n/pt-br/xml/SLES-copy-file-rsync.xml
index 537521e36..b21642582 100755
--- a/l10n/pt-br/xml/SLES-copy-file-rsync.xml
+++ b/l10n/pt-br/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
Crie o arquivo de configuração principal /etc/rsyncd.conf e adicione as seguintes linhas:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log é o local onde o rsync grava os registros.
- /var/lock/rsync.lock é o arquivo que contém o ID do processo da instância do daemon rsync em execução.
+ /var/run/rsync.pid é o arquivo que contém o ID do processo da instância do daemon rsync em execução. Faz a fusão dos valores globais dos arquivos /etc/rsyncd.d/*.inc no arquivo de configuração principal.
diff --git a/l10n/zh-cn/DC-HA-snapper-basics b/l10n/zh-cn/DC-HA-snapper-basics
index 24094ce32..14e846009 100644
--- a/l10n/zh-cn/DC-HA-snapper-basics
+++ b/l10n/zh-cn/DC-HA-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sleha"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/zh-cn/DC-SAP-snapper-basics b/l10n/zh-cn/DC-SAP-snapper-basics
index eb8d7221a..3c0607fd6 100644
--- a/l10n/zh-cn/DC-SAP-snapper-basics
+++ b/l10n/zh-cn/DC-SAP-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles4sap"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/zh-cn/DC-SLES-snapper-basics b/l10n/zh-cn/DC-SLES-snapper-basics
index 36e5e8081..8e9938d85 100644
--- a/l10n/zh-cn/DC-SLES-snapper-basics
+++ b/l10n/zh-cn/DC-SLES-snapper-basics
@@ -10,4 +10,4 @@ PROFOS="sles"
STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
DOCBOOK5_RNG_URI="urn:x-suse:rng:v2:geekodoc-flat"
-XSLTPARAM="--param generate.json-ld=1 "
+
diff --git a/l10n/zh-cn/xml/HA-copy-file-rsync.xml b/l10n/zh-cn/xml/HA-copy-file-rsync.xml
index a42f286aa..96dd661db 100755
--- a/l10n/zh-cn/xml/HA-copy-file-rsync.xml
+++ b/l10n/zh-cn/xml/HA-copy-file-rsync.xml
@@ -281,7 +281,7 @@
创建主配置文件 /etc/rsyncd.conf,并添加以下几行:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log 是 rsync 记录日志的位置。
- /var/lock/rsync.lock 是包含正在运行的 rsync 守护程序实例的 PID 的文件。
+ /var/run/rsync.pid 是包含正在运行的 rsync 守护程序实例的 PID 的文件。 将 /etc/rsyncd.d/*.inc 文件中的全局值合并到主配置文件中。
diff --git a/l10n/zh-cn/xml/SAP-copy-file-rsync.xml b/l10n/zh-cn/xml/SAP-copy-file-rsync.xml
index 2f22b11de..f78ae12b8 100755
--- a/l10n/zh-cn/xml/SAP-copy-file-rsync.xml
+++ b/l10n/zh-cn/xml/SAP-copy-file-rsync.xml
@@ -281,7 +281,7 @@
创建主配置文件 /etc/rsyncd.conf,并添加以下几行:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log 是 rsync 记录日志的位置。
- /var/lock/rsync.lock 是包含正在运行的 rsync 守护程序实例的 PID 的文件。
+ /var/run/rsync.pid 是包含正在运行的 rsync 守护程序实例的 PID 的文件。 将 /etc/rsyncd.d/*.inc 文件中的全局值合并到主配置文件中。
diff --git a/l10n/zh-cn/xml/SLES-copy-file-rsync.xml b/l10n/zh-cn/xml/SLES-copy-file-rsync.xml
index a48c9b07b..3b363d95b 100755
--- a/l10n/zh-cn/xml/SLES-copy-file-rsync.xml
+++ b/l10n/zh-cn/xml/SLES-copy-file-rsync.xml
@@ -281,7 +281,7 @@
创建主配置文件 /etc/rsyncd.conf,并添加以下几行:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -289,7 +289,7 @@ include /etc/rsyncd.d/var/log/rsync.log 是 rsync 记录日志的位置。
- /var/lock/rsync.lock 是包含正在运行的 rsync 守护程序实例的 PID 的文件。
+ /var/run/rsync.pid 是包含正在运行的 rsync 守护程序实例的 PID 的文件。 将 /etc/rsyncd.d/*.inc 文件中的全局值合并到主配置文件中。
diff --git a/references/agama-advanced-storage-configuration.xml b/references/agama-advanced-storage-configuration.xml
index 317f2ac74..1bdc1b1c5 100644
--- a/references/agama-advanced-storage-configuration.xml
+++ b/references/agama-advanced-storage-configuration.xml
@@ -27,865 +27,884 @@
Storage configuration in &agama; is one of the most powerful and flexible components of the
automated installation process. It allows you to declaratively define everything from
- simple partition layouts to sophisticated combinations of LUKS encryption, Logical Volume
- Management (LVM), software RAID, and Btrfs subvolumes — all before the system is booted for
- the first time.
+ simple partition layouts to sophisticated combinations—all before the system is
+ booted for the first time.
- This topic primarily describes the &agama; storage schema as described in
+ This section illustrates several common use cases with examples. For an exhaustive
+ reference of all elements, refer to the schema
.
- Each section in this topic focuses on a specific capability or concept: basic partitions,
- encrypted volumes, nested logical volumes, RAID configurations, Btrfs setups, and advanced
- directives like preservation flags or formatting instructions. Together, they enable
- reproducible, secure, and scalable disk layout management suitable.
-
-
-
- Although a minimal configuration might only need one disk and one mount point, a more
- detailed storage model enables production-ready deployment pipelines, disaster recovery
- consistency, and tight security controls from the first boot.
- Top-level schema elements
+ Basic structure of the storage schema
- At the highest level, the storage section in the &agama; profile is an
- array of disk configuration objects. Each object describes a physical or virtual block
- device, and how partitions or logical volumes should be created on it. These objects live
- under the top-level storage key in the profile schema.
+ A storage section contains several entries describing how to configure the
+ corresponding storage devices, and some extra entries such as boot to
+ setup some general aspects that influence the final layout.
+
+ Each volume group or software RAID can represent a new logical device to be created, or an
+ existing device from the system to be processed. Entries below drives
+ represent devices that can be used as regular disks, which includes removable and fixed
+ disks, SD cards, DASD or zFCP devices, iSCSI disks, and multipath devices. Those entries
+ always correspond to devices that can be found at the system, because &agama; cannot create
+ that kind of devices.
+
+
+ Top-level storage section syntax
+
+"storage": {
+ "drives": [ ... ],
+ "volumeGroups": [ ... ],
+ "mdRaids": [ ... ],
+ "boot": { ... }
+}
+
+
- Minimal top-level storage
+ Compatibility with legacy &ay; storage
- The following example illustrates only the bare minimum needed to define a disk object. It
- does not reflect the full capabilities of the &agama; storage model such as partitioning,
- encryption, volume management, or reuse behavior. These topics are covered in later
- sections.
+ In some cases, storage can be replaced by the
+ legacyAutoyastStorage section. This section supports everything offered
+ by the partitioning section of the &ay; profile. However, &agama;
+ does not validate this special section—be careful to provide valid &ay;
+ options.
+
+ The following sections illustrate certain common use-cases with examples. Before execution,
+ it is recommended to validate the configurations using the tools provided by &agama;.
+
+
+
+ Describing the devices
+
+ The drives collection contains several optional fields, some of which are
+ mutually exclusive.
+
- Minimal top-level storage entry
+ Elements in the drive collection
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": []
- }
-]
+{
+ "alias": "...",
+ "search": { ... },
+ "encryption": { ... },
+ "filesystem": { ... },
+ "partitions": [ ... ],
+ "ptableType": "..."
+}
-
-
- Filesystems partitions
- Standard partitions are used to create filesystems directly on a disk. These are the most
- common storage entities and are defined under the partitions array inside
- a disk object. Each partition can specify properties like mount points, filesystems type,
- format behavior, and reuse preferences.
+ Usually, a device represented by a drive entry is divided into several
+ partitions. Each entry of partitions has the following structure with
+ several optional fields:
+
+ Structure of partitions
+
+{
+ "alias": "...",
+ "search": { ... },
+ "id": "...",
+ "size": { ... },
+ "encryption": { ... },
+ "filesystem": { ... },
+ "delete": ...,
+ "deleteIfNeeded": ...
+}
+
+
- The example below demonstrates a configuration that sets up two partitions: one EFI system
- partition and one root partition formatted with XFS.
+ Drives and partitions can be combined such that the one disk is used to create partitions and
+ the other is directly formatted.
- Partitioned disk with EFI and root filesystems
+ Combination of disks with partitions and direct formatting
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "type": "efi",
- "size": 256,
- "mountPoint": "/boot/efi",
- "preserve": false,
- "format": true
- },
- {
- "type": "partition",
- "mountPoint": "/",
- "fsType": "xfs",
- "size": 20480,
- "preserve": false,
- "format": true
- }
- ]
- }
-]
-
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "filesystem": { "path": "/" },
+ "size": { "min": "10 GiB" }
+ },
+ {
+ "filesystem": { "path": "swap" },
+ "size": "2 GiB"
+ }
+ ]
+ },
+ {
+ "filesystem": { "path": "/home" },
+ }
+ ]
+}
+
- This section contains the following elements:
-
-
-
- type
-
-
- Either efi or partition. Determines how the
- partition is treated.
-
-
-
-
- size
-
-
- Size of the partition in MiB. If omitted, the remaining space is used.
-
-
-
-
- mountPoint
-
-
- Mount point inside the target filesystems. Must be specified unless the partition is
- unmounted.
-
-
-
-
- fsType
-
-
- Filesystems type, such as xfs, ext4, or
- btrfs. Required unless format is false.
-
-
-
-
- preserve
-
-
- Boolean flag. When true, existing data on this partition is preserved. Defaults to
- false.
-
-
-
-
- format
-
-
- Boolean flag. When true, the partition will be freshly formatted. Defaults to true
- unless preserve is set.
-
-
-
-
-
-
- LUKS encryption
+ An entry from volumeGroups can have the following properties:
+
+
+ Structure of volumeGroups
+
+{
+ "alias": "...",
+ "name": "...",
+ "search": { ... },
+ "physicalVolumes": [ ... ],
+ "logicalVolumes": [ ... ],
+ "peSize": ... ,
+ "delete": ...
+}
+
+
- &agama; supports encrypting block devices using LUKS. Encrypted devices can be used as
- mountable filesystems or as physical volumes in LVM setups. Each encrypted block must define
- its own passphrase or refer to a key file.
+ Entries of logicalVolumes are relatively similar to the ones used to
+ describe partitions.
+
+ Structure of logicalVolumes
+
+{
+ "alias": "...",
+ "search": { ... },
+ "name": "...",
+ "size": { ... },
+ "encryption": { ... },
+ "filesystem": { ... },
+ "pool": ...,
+ "usedPool": "...",
+ "stripes": ...,
+ "stripeSize": ...,
+ "delete": ...,
+ "deleteIfNeeded": ...
+}
+
+
- The example below shows a basic LUKS-encrypted root partition, created inside a disk and
- mounted as the system root.
+ To understand how all the previously described elements fit together, consider the following
+ example in which the first disk of the system is partitioned and a volume group is created on
+ top of that partition after encryption, to allocate two file systems.
- Root filesystem encrypted with LUKS
+ A combination of disk partition, volume groups and file systems
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "type": "efi",
- "size": 256,
- "mountPoint": "/boot/efi",
- "format": true,
- "preserve": false
- },
- {
- "type": "crypt",
- "name": "cryptroot",
- "cipher": "aes-xts-plain64",
- "keySize": 512,
- "password": "MY-SECRET-PASSWORD",
- "volume": {
- "type": "partition",
- "mountPoint": "/",
- "fsType": "xfs",
- "size": 20480,
- "format": true,
- "preserve": false
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "alias": "pv",
+ "id": "lvm",
+ "size": { "min": "12 GiB" },
+ "encryption": {
+ "luks2": { "password": "my secret passphrase" }
+ }
}
- }
- ]
- }
-]
+ ]
+ }
+ ],
+ "volumeGroups": [
+ {
+ "name": "system",
+ "physicalVolumes": [ "pv" ],
+ "logicalVolumes": [
+ {
+ "size": { "min": "10 GiB" },
+ "filesystem": { "path": "/", "type": "btrfs" }
+ },
+ {
+ "size": "2 GiB",
+ "filesystem": { "path": "swap", "type": "swap" }
+ }
+ ]
+ }
+ ]
+}
- This section contains the following elements:
-
-
-
- type
-
-
- Must be set to crypt to declare a LUKS encrypted volume.
-
-
-
-
- name
-
-
- Name for the mapped LUKS device, used in /dev/mapper.
-
-
-
-
- cipher
-
-
- Encryption cipher. For example, aes-xts-plain64.
-
-
-
-
- keySize
-
-
- Key size in bits. Common values are 256 or 512.
-
-
-
-
- password
-
-
- The passphrase used to unlock the encrypted volume. Can be replaced with
- keyFile if using an external key.
-
-
-
-
- volume
-
-
- The block device definition that will reside within the LUKS container. Often a single
- partition, but can also be a volume group.
-
-
-
-
+ &agama; can also manage software-defined MD RAID arrays represented as entries at the
+ mdRaids collection.
+
+
+ Structure of mdRaids collection
+
+{
+ "alias": "...",
+ "name": "...",
+ "search": { ... },
+ "level": "...",
+ "chunkSize": ... ,
+ "devices": [ ... ],
+ "size": { ... },
+ "encryption": { ... },
+ "filesystem": { ... },
+ "partitions": [ ... ],
+ "ptableType": "...",
+ "delete": ...
+}
+
+
+
+ The devices property is used to specify the devices that act as members of
+ the RAID.
+
+
+ An example with mdRaids and devices
+
+"storage": {
+ "drives": [
+ {
+ "search": "/dev/sda",
+ "partitions": [
+ { "alias": "sda-40", "size": "40 GiB" }
+ ]
+ },
+ {
+ "search": "/dev/sdb",
+ "partitions": [
+ { "alias": "sdb-40", "size": "40 GiB" }
+ ]
+ }
+ ],
+ "mdRaids": [
+ {
+ "devices": [ "sda-40", "sdb-40" ],
+ "level": "raid0"
+ }
+ ]
+}
+
+
-
- Logical Volume Management (LVM)
+
+ Searching existing devices
+
+ When a section in the profile describes modification and deletion of devices, the description
+ must match with one or more devices from the system. If a description matches several
+ devices, the same operations are applied to all of them. This approach is useful in several
+ situations, such as applying the same partitioning schema to several disks or deleting all
+ partitions of a disk that match a given criteria.
+
- Logical Volume Management allows aggregating multiple block devices or partitions into a
- single logical storage pool. In &agama;, LVM setups are described using a
- volumeGroup type, under which logical volumes are listed.
+ Matching is performed using a search subsection, as illustrated below. By
+ default, all devices in the scope fitting the conditions are matched. You can limit the
+ number of devices that match using max. The following example shows how
+ you can use several search sections to find the three biggest disks in the system, delete all
+ partitions bigger than 1 GiB within them and create new partitions of type RAID.
- LVM setup with a single logical volume
+ Usage of the search section
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "type": "partition",
- "size": 30720,
- "volume": {
- "type": "volumeGroup",
- "name": "systemvg",
- "volumes": [
- {
- "name": "home",
- "mountPoint": "/home",
- "fsType": "xfs",
- "size": 20480,
- "format": true,
- "preserve": false
- }
- ]
+"storage": {
+ "drives": [
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 3
+ },
+ "partitions": [
+ {
+ "search": {
+ "condition": { "size": { "greater": "1 GiB" } }
+ },
+ "delete": true
+ },
+ {
+ "alias": "newRaidPart",
+ "id": "raid",
+ "size": { "min": "1 GiB" }
}
- }
- ]
- }
-]
-
+ ]
+ }
+ ]
+}
+
- This section contains the following elements:
-
-
-
- type
-
-
- Set to volumeGroup to define a new LVM volume group container.
-
-
-
-
- name
-
-
- Name of the volume group. Used for identifying the group in
- /dev/<vg-name>/.
-
-
-
-
- volumes
-
-
- List of logical volumes within this volume group. Each volume is defined using fields
- like name, mountPoint, fsType,
- and size.
-
-
-
-
-
-
- RAID configuration
+ The scope of each search depends on the place in the profile of the search
+ section. In the example, the devices from the system that are considered as possible
+ candidates. A search section inside the description of an MD RAID only matches the software
+ RAID devices. A search section inside the partitions subsection of that
+ RAID description only matches the partitions of RAIDs that have matched the conditions of the
+ most external search.
+
- &agama; allows defining software RAID arrays directly within the storage configuration using
- the mdraid type. You can specify the RAID level, involved devices, chunk
- size, and metadata version, among other options. These arrays can be used as mountable
- volumes or serve as physical volumes in LVM or encryption stacks.
+ A device can never match two different sections of the &agama; profile. When several sections
+ at the same level contain a search subsection, devices are matched in the order the sections
+ appear on the profile.
- Basic RAID 1 setup with /home mounted
+ Order of device matching
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true
- },
- {
- "type": "disk",
- "device": "/dev/sdb",
- "wipe": true
- },
- {
- "type": "mdraid",
- "level": "1",
- "devices": ["/dev/sda", "/dev/sdb"],
- "volume": {
- "type": "partition",
- "mountPoint": "/home",
- "fsType": "xfs",
- "size": 10240,
- "format": true,
- "preserve": false
+"storage": {
+ "drives": [
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 1
+ },
+ "alias": "biggest"
+ },
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 1
+ },
+ "alias": "secondBiggest"
}
- }
-]
-
+ ]
+}
+
- This section includes the following elements:
-
-
-
- type
-
-
- Must be set to mdraid to define a software RAID device.
-
-
-
-
- level
-
-
- The RAID level to use (e.g., 0, 1,
- 5, 6, or 10).
-
-
-
-
- devices
-
-
- List of block devices that participate in the array.
-
-
-
-
- volume
-
-
- The volume definition describing what should be created on top of the RAID array. It
- may be a regular filesystem partition, a LUKS container, or an LVM setup.
-
-
-
-
-
-
- Btrfs layout
-
- &agama; supports configuring Btrfs subvolumes and mount points, allowing granular control
- over snapshot-aware filesystems. You can define a top-level Btrfs partition or volume, then
- define subvolumes under it, each optionally with its own mount point.
+ An empty search matches all devices in the scope. For example, the configuration given below
+ deletes all the partitions of the chosen disk, but only if the disk contains partitions.
- Btrfs setup with multiple subvolumes
+ Empty search matching all devices in the scope
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "type": "btrfs",
- "mountPoint": "/",
- "format": true,
- "subvolumes": [
- {
- "name": "@home",
- "mountPoint": "/home"
- },
- {
- "name": "@log",
- "mountPoint": "/var/log"
- }
- ]
- }
- ]
- }
-]
-
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ { "search": {}, "delete": true }
+ ]
+ }
+ ]
+}
+
- This section includes the following elements:
-
-
-
- type
-
-
- Must be set to btrfs to define a Btrfs partition.
-
-
-
-
- mountPoint
-
-
- The mount point for the root of the Btrfs volume (e.g., /).
-
-
-
-
- format
-
-
- Whether to format the Btrfs partition. Must be set to true to create
- new filesystems.
-
-
-
-
- subvolumes
-
-
- List of Btrfs subvolumes. Each subvolume is an object with at least a
- name, and optionally a mountPoint.
-
-
-
-
- name: The name of the Btrfs subvolume (e.g.,
- @home).
-
-
-
-
- mountPoint: Mount point for this subvolume (e.g.,
- /home).
-
-
-
-
-
-
-
-
- Partition flags and modifiers
+ If there is not a single system device matching the scope and the conditions of a given
+ search, then ifNotFound is used. If the value is skip,
+ the device definition is ignored. If the value is "error" the whole
+ process is aborted.
+
- Flags and attributes that influence partition behavior, naming, sizing, and boot
- compatibility.
+ Entries on drives are different from all other subsections describing devices because drives
+ can only be matched to existing devices. If search is omitted for a drive, it is considered
+ to contain the following configuration:
- Example with partition flags and metadata
+ Behavior of search if omitted for a drive
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "mount": "/boot",
- "size": 512,
- "filesystem": "ext4",
- "esp": true,
- "grow": false,
- "label": "BOOT",
- "id": "boot-partition"
- }
- ]
+{
+ "search": {
+ "sort": "name",
+ "max": 1,
+ "ifNotFound": "error"
}
-]
-
+}
+
- This section describes the following partition attributes:
-
-
-
- grow
-
-
- If set to true, this partition or volume will take up any leftover
- space after allocating other defined volumes.
-
-
-
-
- esp
-
-
- Marks the partition as an EFI System Partition. This is required for UEFI boot setups
- when using a separate /boot partition.
-
-
-
-
- label
-
-
- Human-readable label to assign to the partition or logical volume. For example,
- HOME or BOOT.
-
-
-
-
- id
-
-
- Unique identifier used to reference this partition in other sections or mount
- relationships. Optional, but useful for referencing volumes in complex setups.
-
-
-
-
-
-
- Reusing existing volumes
+ When the syntax of a search subsection becomes cumbersome, you can use simple strings.
+
- To preserve data or reuse partitions from a previous installation, &agama; supports marking
- individual storage entries with preserve: true. This avoids reformatting
- or wiping the specified device or volume.
+ You can use search to find a device by its name. For example:
- Reusing an existing /home partition
+ Searching using device name
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": false,
- "partitions": [
- {
- "mount": "/home",
- "preserve": true,
- "id": "home-partition"
- }
- ]
- }
-]
-
+{ "search": "/dev/sda" }
+
- This section describes the relevant flag:
-
-
-
- preserve
-
-
- If true, the existing content of the volume will not be deleted or
- reformatted. This is useful when retaining data directories like
- /home or reuse across installations. The target must already be
- formatted with a valid filesystem.
-
-
-
-
+ The string * allows to match all the devices from the current context, if
+ any. This is specially useful to match all partitions or logical volumes in a device,
+ irrespective of whether there is any. For example, the two following search sections are
+ equivalent:
+
+
+ Example of equivalent search
+
+{ "search": "*" }
+
+
+{ "search": { "ifNotFound": "skip" } }
+
+
-
- Real-world example configurations
+
+ Referencing other devices
- Here are several end-to-end examples illustrating common and practical storage layouts.
+ At certain times, it is necessary to reference other devices as part of the specification of
+ an LVM volume group or RAID. Those devices can be existing system devices, or devices that
+ will be created as response to another entry of the &agama; profile. For that purpose, you
+ can use alias.
- UEFI layout with root, home, and swap
+ Using alias in storage configuration
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "mount": "/boot/efi",
- "size": 256,
- "filesystem": "vfat",
- "esp": true
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ { "size": "50 GiB", "id": "lvm", "alias": "newPV" }
+ ]
+ }
+ ],
+ "volumeGroups": [
+ {
+ "name": "newVG",
+ "physicalVolumes": [ "newPV" ],
+ "logicalVolumes": [ { "name": "data", "size": "20 GiB" } ]
+ }
+ ]
+}
+
+
+
+ If a section matching several existing devices contains an alias, that alias is considered as
+ a reference to all the devices. Consider the following equivalent examples that assumes there
+ are at least two disks in the system:
+
+
+ Equivalent examples of alias
+
+"storage": {
+ "drives": [
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 1,
},
- {
- "mount": "/",
- "size": 20480,
- "filesystem": "btrfs",
- "label": "ROOT"
+ "alias": "biggest"
+ },
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 1,
},
- {
- "mount": "/home",
- "size": 10240,
- "filesystem": "xfs",
- "label": "HOME"
+ "alias": "secondBiggest"
+ }
+ ],
+ "mdRaids": [
+ {
+ "devices": [ "biggest", "secondBiggest" ],
+ "level": "raid0"
+ }
+ ]
+}
+
+"storage": {
+ "drives": [
+ {
+ "search": {
+ "sort": { "size": "desc" },
+ "max": 2,
},
- {
- "filesystem": "swap",
- "size": 4096
- }
- ]
- }
-]
-
+ "alias": "big"
+ }
+ ],
+ "mdRaids": [
+ {
+ "devices": [ "big" ],
+ "level": "raid0"
+ }
+ ]
+}
+
+
+
+ Specifying the size of a device
+
+ When configuring storage in the &agama; profile, you must specify the desired size for a new
+ device or the target size when resizing an existing one. The schema allows for flexible size
+ specification. The most common method is using a human-readable string that can be parsed
+ into a valid size. For example, 10 GiB. Alternatively, you can provide a
+ size as an array (a tuple) containing a minimum size and an optional maximum size. The
+ resulting size will be between these two thresholds. If the maximum is omitted, the device
+ will expand to consume all available contiguous space, respecting other specified size
+ constraints.
+
+
+ For configurations targeting existing partitions or Logical Volumes (LVs)—which must
+ include a search section—the special keyword current can be used as
+ a minimum or maximum size limit. The use of current and how it affects
+ resizing the corresponding devices is explained separately.
+
+
+ If the size property is completely omitted for an existing device (for example, combined with
+ search), &agama; acts as if both minimum and maximum limits were set to
+ current. This characteristic implies that the partition or logical volume
+ are not be resized. If the size is omitted for a device that will be created but includes a
+ file system entry specifying a mount point, &agama; can determine the size limits by applying
+ the settings of the installation product. In &agama; terminology, the product is the
+ operating system being installed, and it specifies the default size ranges for its relevant
+ file systems, such as /, swap and
+ /home.
+
+
+
+ Partition needed for booting
+
+ You can use the boot entry to configure whether &agama; should calculate
+ and create extra partitions needed for booting. The behavior is same when using an alias. If
+ the device is not specified, &agama; takes the location of the root file system as reference.
+
- LVM-backed root and home partitions
+ Partitions needed for booting
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "type": "lvm",
- "id": "pv-system"
- }
- ]
- },
- {
- "type": "lvm_vg",
- "id": "vg-system",
- "devices": ["pv-system"],
- "volumes": [
- {
- "mount": "/",
- "size": 10240,
- "filesystem": "btrfs",
- "label": "ROOT"
- },
- {
- "mount": "/home",
- "size": 20480,
- "filesystem": "xfs",
- "label": "HOME"
- }
- ]
+"storage": {
+ "drives": [
+ {
+ "search": "/dev/sda",
+ "alias": "bootDisk"
+ },
+ {
+ "search": "/dev/sdb",
+ "partitions": [
+ { "filesystem": { "path": "/" } }
+ ]
+ }
+ ],
+ "boot": {
+ "configure": true,
+ "device": "bootDisk"
}
-]
-
+}
+
+
+
+ Keeping an existing file system or encryption layer
+
+ The entries for both filesystem and encryption contains
+ a reuse flag with a default value of false. You can use
+ it in combination with search to specify that the device must not be
+ re-formatted or re-encrypted.
+
+
+
+ Deleting and shrinking existing devices
+
+ The storage configuration proposal must allow defining how to manage existing storage
+ components, including partitions, LVM logical volumes, MD RAIDs, and LVM volume groups. A
+ search mechanism is employed to match a partition or LVM logical volume definition with one
+ or more devices already present on the system. Once a match is made, you can specify the
+ required action.
+
+
+ The component can be marked to be deleted unconditionally, or deleted if needed to free space
+ for newly defined devices. It can also be shrunk to a necessary size or shrunk or extended to
+ a specific size or range. It is even possible to express combined actions, like attempting to
+ shrink a component first and only proceeding to delete it if shrinking doesn't free up enough
+ space.
+
+
+ Deletion is achieved with the corresponding delete flag for unconditional
+ deletion or the deleteIfNeeded flag for conditional deletion. If either of
+ these flags is active for a partition, it is illogical to specify any other usage for it,
+ such as declaring a file system. For example, you can configure the proposal to
+ unconditionally delete partition number 1 and then conditionally delete
+ other partitions as needed to secure the space for a new 30 Gib partition.
+
- Encrypted LUKS root filesystem
+ Deleting and shrinking existing devices
-[
- {
- "type": "disk",
- "device": "/dev/sda",
- "wipe": true,
- "partitions": [
- {
- "mount": "/boot",
- "size": 512,
- "filesystem": "ext4"
- },
- {
- "type": "luks",
- "id": "crypt-root"
- }
- ]
- },
- {
- "type": "luks_open",
- "id": "decrypted-root",
- "device": "crypt-root",
- "name": "cryptroot",
- "filesystem": "btrfs",
- "mount": "/",
- "label": "ROOT"
- }
-]
-
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "search": {
+ "condition": { "number": 1 }
+ },
+ "delete": true
+ },
+ { "search": {}, "deleteIfNeeded": true },
+ { "size": "30 GiB" }
+ ]
+ }
+ ]
+}
+
+
+ Often some partitions or logical volumes are shrunk only to make space for the declared
+ devices. But because resizing is not a destructive operation, you can declare a given
+ partition to be resized (shrunk or extended), then formatted and/or mounted.
+
+
+ Limitation of resizing
+
+ Resizing a partition can be limited depending on its content and the file system type.
+
+
+
+ Combining search and resize is enough to indicate that
+ &agama; is expected to resize a given partition, if possible. The keyword
+ current can be used as min and/or
+ max for the size range, and it is always equivalent to the exact original
+ size of the device. The simplest way to use current is to just specify
+ that the matched device should keep its original size. That is the default for searched (and
+ found) devices, if size is completely omitted.
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "search": {
+ "condition": { "number": 1 }
+ },
+ "size": { "min": "current", "max": "current" }
+ }
+ ]
+ }
+ ]
+}
+
+
+ You can use other combinations to specify how a device could be resized, if possible. See the
+ following examples with explanatory file system labels.
+
+
+
+ The condition fsLabel is not yet implemented.
+
+
+
+ Deleting and shrinking of existing devices including the fsLabel condition for file system labels
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "search": {
+ "condition": { "fsLabel": "shrinkIfNeeded" }
+ },
+ "size": { "min": 0, "max": "current" }
+ },
+ {
+ "search": {
+ "condition": { "fsLabel": "resizeToFixedSize" }
+ },
+ "size": "15 GiB"
+ },
+ {
+ "search": {
+ "condition": { "fsLabel": "resizeByRange" }
+ },
+ "size": { "min": "10 GiB", "max": "50 GiB" }
+ },
+ {
+ "search": {
+ "condition": { "fsLabel": "growAsMuchAsPossible" }
+ },
+ "size": { "min": "current" }
+ },
+ ]
+ }
+ ]
+}
+
+
+
+ When the size limits are specified as a combination of current and a fixed
+ value, ensure that the resulting min is not bigger than the resulting
+ max.
+
+
+ Both deleteIfNeeded and a size range can be combined to indicate that
+ &agama; should make space first, by shrinking the partitions and deleting them only if
+ shrinking is not enough.
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "search": {},
+ "size": { "min": 0, "max": "current" },
+ "deleteIfNeeded": true
+ }
+ ]
+ }
+ ]
+}
+
-
- Partition type reference
-
- The following types are valid values for the type field in the &agama;
- storage schema. Each type describes a different layer or behavior in the storage stack.
-
-
-
- disk
-
-
- Represents a physical or virtual block device. It contains a list of partitions or
- other volume definitions.
-
-
-
-
- partition
-
-
- Represents a filesystem partition on a disk. Most commonly used for mount points like
- /boot, /home, or /.
-
-
-
-
- luks
-
-
- Marks a partition to be encrypted using LUKS. Must be opened later using
- luks_open.
-
-
-
-
- luks_open
-
-
- Refers to an encrypted volume defined via luks. It allows specifying
- filesystem and mount point on the decrypted device.
-
-
-
-
- lvm
-
-
- Initializes a partition as a physical volume (PV) for LVM. Used in combination with
- lvm_vg.
-
-
-
-
- lvm_vg
-
-
- Defines a volume group that aggregates one or more PVs. Contains logical volumes with
- their own mount points and filesystems.
-
-
-
-
- mdraid
-
-
- Used to define software RAID arrays (e.g., RAID1, RAID5) over multiple disks or
- partitions.
-
-
-
-
- btrfs_subvolume
-
-
- Defines a Btrfs subvolume and mount point within a Btrfs-formatted volume. Requires a
- parent Btrfs filesystem to exist.
-
-
-
-
+
+ Generating default volumes
+
+ Every product provides a configuration which defines the storage volumes. For example,
+ feasible file systems for root and default partitions to create. The default or mandatory
+ product volumes can be automatically generated by using a generate section
+ in the partitions or logicalVolumes sections.
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ { "generate": "default" }
+ ]
+ }
+ ]
+}
+
+
+ The generate section allows creating the product volumes without
+ explicitly writing all of them. The configuration above would be equivalent to the following:
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ { "filesystem": { "path": "/" } },
+ { "filesystem": { "path": "/home" } },
+ { "filesystem": { "path": "swap" } }
+ ]
+ }
+ ]
+}
+
+
+ If any path is explicitly defined, the generate section will not generate
+ a volume for it. For example, with the following configuration, only root and swap would be
+ automatically added.
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ { "generate": "default" },
+ { "filesystem": { "path": "/home" } }
+ ]
+ }
+ ]
+}
+
+
+ The auto-generated volumes can be also configured. For example, for encrypting the
+ partitions:
+
+
+"storage": {
+ "drives": [
+ {
+ "partitions": [
+ {
+ "generate": {
+ "partitions": "default",
+ "encryption": {
+ "luks1": { "password": "12345" }
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
+
+
+ The mandatory keyword can be used for generating only the mandatory
+ partitions or logical volumes:
+
+
+"storage": {
+ "volumeGroups": [
+ {
+ "name": "system",
+ "logicalVolumes": [
+ { "generate": "mandatory" }
+ ]
+ }
+ ]
+}
+
-
- Common pitfalls and edge cases
-
- This section outlines frequently encountered mistakes and gotchas that can lead to
- installation failures or misconfigured systems when using &agama;'s storage schema.
-
-
-
-
- Missing or incorrect id fields: Every volume
- layer (disk, partition, luks, lvm, etc.) should have a unique and predictable
- id. Reusing IDs or leaving them out leads to ambiguous device paths
- during setup.
-
-
-
-
- Forgetting to mount the root volume: If no volume
- has a mount set to /, the system will not boot
- properly.
-
-
-
-
- Not marking a bootable ESP for EFI systems:
- UEFI-based installations must have an EFI System Partition (esp: true)
- mounted at /boot/efi.
-
-
-
-
- Overlapping device references: Using the same partition or device in
- more than one storage object (e.g., as both LUKS and plain partition) can result in
- failed setups.
-
-
-
-
- Incorrect vg names in LVM: Ensure that the
- vg names in LVM volumes match exactly with the defined
- id of their respective lvm_vg parent objects.
-
-
-
-
- Using preserve: true without
- id: &agama; requires the preserved volume to be clearly
- referenced. Omitting id or device fields for
- preserved objects can break the reuse logic.
-
-
-
-
- Inconsistent RAID configurations: All RAID members must define the
- same raid group id and match in level and layout.
- Mismatches can silently fail or create invalid arrays.
-
-
-
+
+ Generating physical volumes
+
+ You can configure volume groups to explicitly use a set of devices as physical volumes. The
+ aliases of the devices to use are added to the list of physical volumes:
+
+
+"storage": {
+ "drives": [
+ {
+ "search": "/dev/vda",
+ "partitions": [
+ { "alias": "pv2", "size": "100 GiB" },
+ { "alias": "pv1", "size": "20 GiB" }
+ ]
+ }
+ ],
+ "volumeGroups": [
+ {
+ "name": "system",
+ "physicalVolumes": ["pv1", "pv2"]
+ }
+ ]
+}
+
+
+ The physical volumes can be automatically generated too, by simply indicating the target
+ devices in which to create the partitions. For that, a generate section is
+ added to the list of physical volumes:
+
+
+"storage": {
+ "drives": [
+ {
+ "search": "/dev/vda",
+ "alias": "pvs-disk"
+ }
+ ],
+ "volumeGroups": [
+ {
+ "name": "system",
+ "physicalVolumes": [
+ { "generate": ["pvs-disk"] }
+ ]
+ }
+ ]
+}
+
+
+ If the auto-generated physical volumes have to be encrypted, then the encryption config is
+ added to the generate section:
+
+
+"storage": {
+ "drives": [
+ {
+ "search": "/dev/vda",
+ "alias": "pvs-disk"
+ }
+ ],
+ "volumeGroups": [
+ {
+ "name": "system",
+ "physicalVolumes": [
+ {
+ "generate": {
+ "targetDevices": ["pvs-disk"],
+ "encryption": {
+ "luks2": { "password": "12345" }
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
+
diff --git a/references/agama-installation-profile-details.xml b/references/agama-installation-profile-details.xml
index a7e6d5248..66fbd7fcf 100644
--- a/references/agama-installation-profile-details.xml
+++ b/references/agama-installation-profile-details.xml
@@ -75,7 +75,7 @@
registrationCode
- The registration code for the product obtained from &scc;, and used to activate
+ The registration code for the product obtained from the &scc; and used to activate
repositories and receive updates.
@@ -84,7 +84,7 @@
registrationEmail
- The email address associated with the registration account used during product
+ The e-mail address associated with the registration account used during product
activation.
@@ -93,7 +93,7 @@
registrationUrl
- The full URL of the registration server. If you are using &scc;, you can omit this
+ The full URL of the registration server. If you are using the &scc;, you can omit this
field. However, it is useful when registering from a custom server.
@@ -130,10 +130,10 @@
- Hostname configuration for an &agama; installation profile
+ Host name configuration for an &agama; installation profile
- The hostname section sets the system's static and transient hostname. The
- static hostname is persistent across reboots, while the transient hostname is used
+ The hostname section sets the system's static and transient host name. The
+ static host name is persistent across reboots, while the transient host name is used
temporarily at runtime and may be overridden by network services like DHCP.
@@ -153,9 +153,9 @@
static
- The persistent hostname written to /etc/hostname. This name
+ The persistent host name written to /etc/hostname. This name
remains consistent across system reboots and is used by default if no transient
- hostname is specified.
+ host name is specified.
@@ -163,7 +163,7 @@
transient
- A temporary hostname applied at runtime. This may be used during deployment or
+ A temporary host name applied at runtime. This may be used during deployment or
installation to reflect an ephemeral identity. For example, it can be set via DHCP or
by installation tooling like &agama;.
@@ -208,12 +208,12 @@
The root user's password. If hashedPassword is
- true, this must be a pre-generated hash (For example, using
+ true, this must be a pre-generated hash (for example, using
openssl passwd -6). Otherwise, plain text is accepted and will be
hashed during installation.
- You can generated a hashed password by running the following command:
+ You can generate a hashed password by running the following command:
&prompt.sudo;openssl passwd -6
@@ -233,8 +233,8 @@
&prompt.sudo;ssh-keygen -t rsa -b 4096 -C "YOUR-EMAIL@EXAMPLE.COM"
- Based on your requirements, choose the key type and the key size. However, it's better
- to adopt a stronger security.
+ Based on your requirements, choose the key type and the key size. However, it is better
+ to adopt stronger security.
@@ -295,7 +295,7 @@
password
- The user's password, in plain text or pre-hashed depending on the
+ The user's password, in plain text or pre-hashed, depending on the
hashedPassword flag. If plaintext is provided, it will be
automatically hashed.
@@ -337,7 +337,7 @@
language
- The system language and locale, specified as a locale string. For example,
+ The system language and locale specified as a locale string. For example,
en_US.UTF-8 or de_DE. This controls messages,
number formats, date formats, and default encoding.
@@ -357,7 +357,7 @@
timezone
- The system time zone, using a region/location format. For example,
+ The system time zone using a region/location format. For example,
Europe/Berlin. This sets the default system clock and affects
date/time display.
@@ -416,10 +416,12 @@
- Trust the GPG key for the Package Hub repository
-
- When enabling &ph; during a manual installation, users are prompted to trust the repository's GPG key. To trust the key automatically during an unattended installation, use the following snippet:
-
+ Trust the GPG key for the Package Hub repository
+
+ When enabling &ph; during a manual installation, users are prompted to trust the
+ repository's GPG key. To trust the key automatically during an unattended installation, use
+ the following snippet:
+
{
product: {
@@ -453,89 +455,54 @@
Storage configuration for an &agama; installation profile
The storage section defines the system's target disk layout, such as
- partitions, filesystems, and volume management, to be applied during installation. This field
- references the Agama storage schema, which is referenced from the profile schema.
+ devices, partitions and volume management, to be applied during installation. This field
+ references the &agama; storage schema, which is referenced from the profile schema.
- Advanced storage configuration
-
- An exhaustive description of all possible storage configuration using &agama; is beyond the
- scope of this section, as it will need a careful consideration of the
- storage model schema. For information on advanced storage configuration, refer to the section .
-
+ Advanced storage configuration
+
+ An exhaustive description of all possible storage configurations using &agama; is beyond the
+ scope of this section, as it will need careful consideration of the storage model schema.
+ For information on advanced storage configuration, refer to .
+
- Sample storage configuration for an &agama; installation profile
+ storage section syntax for an &agama; installation profile
"storage": {
- "disks": [
- {
- "device": "/dev/sda",
- "partitions": [
- {
- "mountPoint": "/",
- "fsType": "ext4",
- "size": "20G"
- },
- {
- "mountPoint": "swap",
- "fsType": "swap",
- "size": "4G"
- }
- ]
- }
- ]
+ "drives": [ ... ],
+ "volumeGroups": [ ... ],
+ "mdRaids": [ ... ],
+ "boot": { ... }
}
- This section contains the following fields:
+ A storage section contains several entries describing how to configure the
+ corresponding storage devices, and some extra entries such as boot to
+ setup some general aspects that influence the final layout.
-
-
- disks
-
-
- A list of disks on which partitions will be defined.
-
-
-
-
- device: The full device path, such as
- /dev/sda.
-
-
-
-
- partitions: A list of partitions to create on the disk.
-
-
-
-
- mountPoint: The mount point for the partition, or
- swap for swap areas.
-
-
-
-
- fsType: Filesystem type, such as ext4 or
- swap.
-
-
-
-
- size: Size of the partition (e.g., 20G).
-
-
-
-
-
-
-
-
+
+ Each volume group or software RAID can represent a new logical device to be created, or an
+ existing device from the system to be processed. Entries below drives
+ represent devices that can be used as regular disks, which includes removable and fixed
+ disks, SD cards, DASD or zFCP devices, iSCSI disks, and multipath devices. Those entries
+ always correspond to devices that can be found at the system, because &agama; cannot create
+ that kind of devices.
+
+
+ Compatibility with legacy &ay; storage
+
+ In some cases, storage can be replaced by the
+ legacyAutoyastStorage section. This section supports everything offered
+ in the partitioning section of the &ay; profile. However, &agama;
+ does not validate this special section—be careful to provide valid &ay;
+ options.
+
+
- Bootloader configuration for an &agama; installation profile
+ Boot loader configuration for an &agama; installation profile
The bootloader section defines boot-time behavior, including whether to
pause at the boot menu and what extra kernel parameters to pass. It affects the installed
@@ -659,7 +626,7 @@
Support for IEEE 802.1X authentication is intended for advanced enterprise
- deployments where authentication is required at the link layer, before IP is
+ deployments where authentication is required at the link layer, before an IP is
assigned. This commonly involves integration with RADIUS and certificate-based trust.
Misconfiguration can result in complete network inaccessibility. Refer to
systemd-networkd documentation for authoritative guidance.
@@ -732,7 +699,7 @@
autoconnect
- Boolean. Whether the connection is brought up automatically.
+ Boolean. Specifies whether the connection is brought up automatically.
@@ -771,7 +738,7 @@
- stp: Boolean. Enables Spanning Tree Protocol.
+ stp: Boolean. Enables the Spanning Tree Protocol.
@@ -1118,9 +1085,9 @@
- Legacy AutoYaST storage configuration for an &agama; installation profile
+ Legacy &ay; storage configuration for an &agama; installation profile
- The legacyAutoyastStorage section allows reuse of Auto&yast;-style storage
+ The legacyAutoyastStorage section allows reuse of &ay;-style storage
definitions by expressing them in JSON. It accepts an array of opaque objects directly
representing the legacy partitioning structure, allowing migration or backward compatibility
for existing storage configurations.
@@ -1158,9 +1125,9 @@
legacyAutoyastStorage
- An array of JSON objects compatible with the XML structure used in Auto&yast;’s
+ An array of JSON objects compatible with the XML structure used in &ay;’s
partitioning section. This allows experienced administrators to
- reuse complex partitioning logic without switching to Agama-native storage syntax.
+ reuse complex partitioning logic without switching to &agama;-native storage syntax.
@@ -1214,7 +1181,7 @@
- address: IP address or hostname of the iSCSI target.
+ address: IP address or host name of the iSCSI target.
diff --git a/references/ansible-basic-usage.xml b/references/ansible-basic-usage.xml
index d25b18a01..de7373cc2 100644
--- a/references/ansible-basic-usage.xml
+++ b/references/ansible-basic-usage.xml
@@ -40,8 +40,8 @@
model, where you define the desired final state. Ansible then automatically calculates and executes the necessary steps to achieve that state.
This approach ensures the task can be run multiple times without causing unintended changes, as Ansible first checks the
current state of the host and only performs an action if it differs from the specified final state. Some examples include:
- &prompt.user; ansible webservers -m ansible.builtin.service -a "name=httpd state=started"
- &prompt.user; ansible all -m ansible.builtin.setup
+ &prompt.sudo;ansible webservers -m ansible.builtin.service -a "name=httpd state=started"
+ &prompt.sudo;ansible all -m ansible.builtin.setupAnsible command-line tools
@@ -254,7 +254,6 @@ encryption/decryption utility for Ansible data files
- name: Ensure essential packages are present
hosts: all
become: true
- gather_facts: true
tasks:
- name: Install vim and curl
@@ -271,21 +270,21 @@ encryption/decryption utility for Ansible data files
simple playbooks.
Run the playbook:
-&prompt.user; ansible-playbook -i hosts.ini install.yml
+&prompt.user; ansible-playbook -i test.ini install.ymlAnsible connects to each host in your inventory and executes the defined tasks using the system's package manager.Understanding roles
-Roles allow you to easily reuse and share your Ansible automation by using a known file structure to automatically load all related artifacts, including tasks, variables, files, and handlers.
+Roles allow you to easily reuse and share your Ansible Automation by using a known file structure to automatically load all related artifacts, including tasks, variables, files, and handlers.Using rolesYou can use the role in the following ways:
- At the play level with the roles: This is the default way of using roles in a play.
- At the task level with ansible.builtin.import_role: You can reuse roles dynamically anywhere in the tasks section of a play using ansible.builtin.import_role.
- At the task level with ansible.builtin.import_role: You can reuse roles dynamically anywhere in the tasks section of a play using ansible.builtin.import_role.
+ At the play level with the roles option: This is the default way of using roles in a play.
+ At the task level with ansible.builtin.include_role: You can reuse roles dynamically anywhere in the tasks section of a play using ansible.builtin.include_role.
+ At the task level with ansible.builtin.import_role: You can reuse roles statically anywhere in the tasks section of a play using ansible.builtin.import_role.As a dependency of another role.A play is the fundamental execution unit within an Ansible playbook.
@@ -297,17 +296,17 @@ encryption/decryption utility for Ansible data files
---
- hosts: webservers
tasks:
- - name: Print a message
+ - name: Print a message before executing role
ansible.builtin.debug:
- msg: "before we run our role"
+ msg: "Debug message before executing role"
- name: Apply standard user and group setup from 'base_users' role
ansible.builtin.import_role:
name: base_users
- - name: Print a message
+ - name: Print a message after executing role
ansible.builtin.debug:
- msg: "after we ran our role"
+ msg: "Debug message after executing role"
You can use other keywords, for example:
@@ -363,7 +362,7 @@ encryption/decryption utility for Ansible data files
Ansible gives precedence to variables defined more recently, more active and with more explicit scope. Variables inside the default
folder are easily overridden. Anything in the vars directory of the role overrides previous versions
-of that variable in the namespace.
+of that variable in the namespace. For more on variable precedence, refer to .
Setting variables
@@ -399,10 +398,10 @@ For example, if all systems in the manhattan group use The content of each variable file is a simple YAML dictionary. For example, /vars/test.yml :
- name: Show foo and bar
- ansible.builtin.debug:
- msg:
- foo: "{{ foo }}"
- bar: "{{ bar }}"
+ ansible.builtin.debug:
+ msg:
+ foo: "{{ foo }}"
+ bar: "{{ bar }}"
diff --git a/references/ansible-support-coverage-new.xml b/references/ansible-support-coverage-new.xml
new file mode 100644
index 000000000..308e91fa5
--- /dev/null
+++ b/references/ansible-support-coverage-new.xml
@@ -0,0 +1,49 @@
+
+
+
+
+ %entities;
+]>
+
+
+ Ansible support coverage
+
+
+
+The support matrix for Ansible and Python compatibility:
+
+
+
+
+ Control node:
+
+ Python 3.11 - 3.13
+ ansible-core 2.18
+ Ansible 11
+
+
+
+ Managed node:
+
+ Python 3.11 - 3.13
+
+
+
+
+
+ Python and Ansible versions in the control and managed nodes must be compatible.
+ Third-party software is not supported.
+ Only versions provided in packages that install into System Python are supported.
+ Python Virtual Environment or alternatives like pyenv are not supported.
+
+
+
+
diff --git a/references/autoyast-agama-compatibility-reference.xml b/references/autoyast-agama-compatibility-reference.xml
index 611509978..388357362 100644
--- a/references/autoyast-agama-compatibility-reference.xml
+++ b/references/autoyast-agama-compatibility-reference.xml
@@ -18,28 +18,28 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- Compatibility between Auto&yast; and &agama; profiles
+ Compatibility between &ay; and &agama; profiles
- Auto&yast; has long been the standard for unattended and automated installations in
+ &ay; has long been the standard for unattended and automated installations in
&productname; systems. With the advent of the &agama; installer, a new approach to system
- configuration and deployment has emerged—designed to be modular, declarative, and
+ configuration and deployment has emerged—designed to be modular, declarative and
extensible using modern formats and APIs.
- This section provides a detailed comparative view of the configuration models in Auto&yast;
+ This section provides a detailed comparative view of the configuration models in &ay;
and &agama;, highlighting conceptual differences and offering practical guidance for
- transitioning to the &agama; profile format. The goal is to equip experienced Auto&yast;
+ transitioning to the &agama; profile format. The goal is to equip experienced &ay;
users with a clear roadmap for migrating existing profiles to the new &agama; schema.
- Where applicable, compatibility matrices are provided to indicate which Auto&yast; modules
+ Where applicable, compatibility matrices are provided to indicate which &ay; modules
and fields are currently supported, planned, undecided, or explicitly unsupported in
&agama;. These mappings are based on the upstream reference maintained by the &agama;
project.
@@ -50,14 +50,14 @@
Conceptual differences
This table highlights the fundamental differences in design philosophy and approach between
- Auto&yast; and &agama;.
+ &ay; and &agama;.
- Auto&yast; vs &agama; design comparison
+ &ay; vs &agama; design comparison
- Auto&yast;
+ &ay;&agama;
@@ -87,29 +87,29 @@
- Mapping Auto&yast; sections to &agama; schema
+ Mapping &ay; sections to &agama; schema
This section provides a detailed comparison and translation map between the major sections
- and modules of Auto&yast; and their equivalents (or lack thereof) in the &agama; profile
+ and modules of &ay; and their equivalents (or lack thereof) in the &agama; profile
schema. Each subsection addresses a particular functional area, indicating how configuration
responsibilities are split or restructured in &agama;, and clearly states where support is
partial, planned, or unavailable.
- Granular support status for Auto&yast; elements
+ Granular support status for &ay; elements
- For a more granular information on the compatibility and support status for Auto&yast;
+ For a more granular information on the compatibility and support status for &ay;
elements in &agama; profiles as compared to what is presented here, refer to the upstream
documentation
- .
+ .
System identity and localization
- This section covers the basic configuration for setting the system's hostname, language,
- keyboard layout, timezone, and the installed product identity. These are foundational
- parameters during the installation and are typically mapped one-to-one between Auto&yast;
+ This section covers the basic configuration for setting the system's host name, language,
+ keyboard layout, time zone, and the installed product identity. These are foundational
+ parameters during the installation and are typically mapped one-to-one between &ay;
and &agama;.
@@ -117,7 +117,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -128,7 +128,7 @@
hostnamehostname.static / hostname.transientFully supported
- &agama; distinguishes static and transient hostnames.
+ &agama; distinguishes static and transient host names.language
@@ -146,7 +146,7 @@
timezonelocalization.timezoneFully supported
- Timezone IDs follow the standard timezone database names (for example, Europe/Berlin).
+ Time zone IDs follow the standard time zone database names (for example, Europe/Berlin).product / base
@@ -171,7 +171,7 @@
- Auto&yast; element
+ &ay; elementSupport status&agama; fieldComment
@@ -245,7 +245,7 @@
Network configuration
- This section details how network settings are defined in Auto&yast; and &agama;, covering
+ This section details how network settings are defined in &ay; and &agama;, covering
interface setup, DHCP/static addressing, bonding, bridging, and other advanced networking
configurations.
@@ -254,7 +254,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -283,7 +283,7 @@
network/hostnamehostnameFully supported
- System hostname can be set independently of network block.
+ System host name can be set independently of network block.network/bridge
@@ -316,8 +316,8 @@
Storage and partitioning
- This section compares the storage configuration capabilities of Auto&yast; and &agama;. It
- covers traditional partitions, logical volumes, filesystems, encryption, RAID, and other
+ This section compares the storage configuration capabilities of &ay; and &agama;. It
+ covers traditional partitions, logical volumes, file systems, encryption, RAID, and other
storage-specific aspects of system setup.
@@ -325,7 +325,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -342,7 +342,7 @@
filesystemsstorage.devices[].partitions[].filesystemFully supported
- Common filesystems such as ext4, xfs, and btrfs are supported with mount and format options.
+ Common file systems such as ext4, XFS, and Btrfs are supported with mount and format options.lvm
@@ -354,13 +354,13 @@
raidstorage.devices[].partitions[].raidFully supported
- Software RAID levels (0, 1, 5, etc.) are supported including metadata and spare settings.
+ Software RAID levels (0, 1, 5, etc.) are supported, including metadata and spare settings.btrfsstorage.devices[].partitions[].btrfsFully supported
- Subvolumes, compression, and btrfs-specific mount options are available.
+ Subvolumes, compression, and Btrfs-specific mount options are available.encryption
@@ -393,7 +393,7 @@
Supports marking partitions as bootable, ESP, hidden, etc.
- complex criteria (e.g., by-id)
+ complex criteria (for example, by-id)storage.devices[].matchFully supportedDevices can be selected using labels, device paths, UUIDs, or custom match rules.
@@ -405,15 +405,15 @@
Software selection and patterns
- This section maps how software selection is handled in Auto&yast; and &agama;, including
+ This section maps how software selection is handled in &ay; and &agama;, including
individual package installation and pattern-based selections.
- Software and pattern mapping between Auto&yast; and &agama;
+ Software and pattern mapping between &ay; and &agama;
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -449,16 +449,16 @@
- Bootloader settings
+ Boot loader settings
- This section maps bootloader configuration options between Auto&yast; and &agama; profiles.
+ This section maps bootloader configuration options between &ay; and &agama; profiles.
- Bootloader configuration mapping
+ Boot loader configuration mapping
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -475,13 +475,13 @@
bootloader—kernel_parametersbootloader.extraKernelParamsFully supported
- Additional kernel command line parameters.
+ Additional kernel command-line parameters.bootloader—flag (for example, no_timeout)bootloader.stopOnBootMenuFully supported
- Controls whether the bootloader stops on the boot menu.
+ Controls whether the bootloader stops at the boot menu.bootloader—location
@@ -502,7 +502,7 @@
Security, certificates, and registration
- This section compares how Auto&yast; and &agama; handle security settings, certificate
+ This section compares how &ay; and &agama; handle security settings, certificate
deployment, and system registration during installation.
@@ -510,7 +510,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -554,14 +554,14 @@
Pre-install, post-install, and init scripts
- This section maps the script execution phases between Auto&yast; and &agama; profiles.
+ This section maps the script execution phases between &ay; and &agama; profiles.
- Script phases in Auto&yast; vs. &agama;
+ Script phases in &ay; vs. &agama;
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -599,7 +599,7 @@
File deployment and customization
- This section compares how custom files can be deployed during installation using Auto&yast;
+ This section compares how custom files can be deployed during installation using &ay;
and &agama; profiles.
@@ -607,7 +607,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -643,7 +643,7 @@
Miscellaneous hardware-specific sections
- This section covers specialized hardware-related configuration elements from Auto&yast; and
+ This section covers specialized hardware-related configuration elements from &ay; and
their equivalents (or lack thereof) in &agama;.
@@ -651,7 +651,7 @@
- Auto&yast; element
+ &ay; element&agama; fieldSupport statusComment
@@ -674,13 +674,13 @@
zipl—Not supported
- Bootloader configuration on s390x is partially handled by other fields like bootloader but no direct equivalent for zipl.
+ Boot loader configuration on s390x is partially handled by other fields like bootloader but no direct equivalent for zipl.kdump—Not supported
- &agama; does not currently support configuring kdump crash kernels.
+ &agama; does not currently support configuring &kdump; crash kernels.udev
@@ -694,19 +694,19 @@
- Unsupported Auto&yast; profile elements in &agama;
+ Unsupported &ay; profile elements in &agama;
- The following table lists Auto&yast; profile sections that are currently not supported by
+ The following table lists &ay; profile sections that are currently not supported by
&agama;. These modules either have no equivalent functionality in &agama;, are considered
legacy or niche, or are planned for future implementation. This list is essential for users
- migrating from Auto&yast; to avoid misconfiguration or unmet expectations.
+ migrating from &ay; to avoid misconfiguration or unmet expectations.
- Unsupported Auto&yast; modules in &agama;
+ Unsupported &ay; modules in &agama;
- Auto&yast; element
+ &ay; elementSupport statusComment
@@ -785,7 +785,7 @@
hostNot supported
- Deprecated; handled through hostname and networking.
+ Deprecated; handled through host name and networking.http-server
diff --git a/references/ha-installation-options.xml b/references/ha-installation-options.xml
index f02a3c6dd..d6744508b 100644
--- a/references/ha-installation-options.xml
+++ b/references/ha-installation-options.xml
@@ -32,7 +32,7 @@
-
+
Installing a Basic Two-Node &ha; Cluster
@@ -45,7 +45,7 @@
-
+
Installing a Basic Three-Node &ha; Cluster
diff --git a/references/indetifying_os_values.xml b/references/indetifying_os_values.xml
index 8e252f4a8..22a23c895 100644
--- a/references/indetifying_os_values.xml
+++ b/references/indetifying_os_values.xml
@@ -29,9 +29,9 @@
- Different content for &sle; 16.0 and &slm; 6.2
+ Different content for &suselinux; 16.0 and &slm; 6.2
- With &sle; 16.0 and &slm; 6.2 the content of the /etc/os-release is
+ With &suselinux; 16.0 and &slm; 6.2 the content of the /etc/os-release is
changed to the values described in the following sections. However, the valus are
compatible with previous releases.
@@ -43,7 +43,7 @@
Common attributes
- The common attributes have the same values for all &sle; products.
+ The common attributes have the same values for all &suselinux; products.
diff --git a/references/libvirt_configuration_gui.xml b/references/libvirt_configuration_gui.xml
new file mode 100644
index 000000000..f4a3ad7ff
--- /dev/null
+++ b/references/libvirt_configuration_gui.xml
@@ -0,0 +1,1135 @@
+
+
+ %entities;
+]>
+
+
+ Configuring virtual machines with &vmm;
+
+
+
+ &vmm;'s Details view offers in-depth information
+ about the &vmguest;'s complete configuration and hardware equipment.
+ Using this view, you can also change the guest configuration or add and
+ modify virtual hardware. To access this view, open the guest's console
+ in &vmm; and either choose View
+ Details from the menu, or click
+ Show virtual hardware details in the toolbar.
+
+
+
+
+ yes
+
+
+
+ 2024-06-27
+
+
+
+
+
+
+
+
+ Details view of a &vmguest;
+
+
+
+
+
+
+
+
+
+
+ The left panel of the window lists &vmguest; overview and already installed
+ hardware. After clicking an item on the list, you can access its detailed
+ settings in the details view. You can change the hardware parameters to
+ match your needs, then click Apply to confirm them.
+ Certain changes take effect immediately, while others need a reboot of the
+ machine—and virt-manager warns you about that
+ fact.
+
+
+ To remove installed hardware from a &vmguest;, select the appropriate list
+ entry in the left panel and then click Remove in the
+ bottom right of the window.
+
+
+ To add new hardware, click Add Hardware below the left
+ panel, then select the type of the hardware you want to add in the
+ Add New Virtual Hardware window. Modify its parameters
+ and confirm with Finish.
+
+
+ The following sections describe configuration options for the specific
+ hardware type being added. They do not focus on
+ modifying an existing piece of hardware, as the options are identical.
+
+
+ Machine setup
+
+
+ This section describes the setup of the virtualized processor and memory
+ hardware. These components are vital to a &vmguest;, therefore you cannot
+ remove them. It also shows how to view the overview and performance
+ information, and how to change boot parameters.
+
+
+
+ Overview
+
+ Overview shows basic details about &vmguest; and the
+ hypervisor.
+
+
+ Overview details
+
+
+
+
+
+
+
+
+
+
+ Name, Title, and
+ Description are editable and help you identify
+ &vmguest; in the Virtual Machine Manager list of
+ machines.
+
+
+ &vmguest; title and description
+
+
+
+
+
+
+
+
+
+
+ UUID shows the universally unique identifier of the
+ virtual machine, while Status shows its current
+ status—Running, Paused, or
+ Shutoff.
+
+
+ The Hypervisor Details section shows the hypervisor
+ type, CPU architecture, used emulator, and chipset type. None of the
+ hypervisor parameters can be changed.
+
+
+
+
+ Performance
+
+ Performance shows regularly updated charts of CPU
+ and memory usage, and disk and network I/O.
+
+
+ Performance
+
+
+
+
+
+
+
+
+
+
+ Enabling disabled charts
+
+ Not all the charts in the Graph view are enabled
+ by default. To enable these charts, go to
+ FileView
+ Manager, then select
+ EditPreferences
+ Polling, and check the charts that
+ you want to see regularly updated.
+
+
+
+ Statistics charts
+
+
+
+
+
+
+
+
+
+
+
+
+ Processor
+
+ CPU includes detailed information about &vmguest;
+ processor configuration.
+
+
+ Processor view
+
+
+
+
+
+
+
+
+
+
+ In the CPUs section, you can configure the number of
+ virtual CPUs allocated to the &vmguest;. Logical host
+ CPUs shows the number of online and usable CPUs on the
+ &vmhost;.
+
+
+ The Configuration section lets you configure the CPU
+ model and topology.
+
+
+ When activated, the Copy host CPU configuration
+ option uses the host CPU model for &vmguest;. You can see the details
+ of the host CPU model in the output of the virsh
+ capabilities command. When deactivated, the CPU model needs
+ to be specified from the models available in the drop-down box.
+
+
+ The host CPU model provides a good trade-off between CPU
+ features and the ability to migrate the &vmguest;. &libvirt; does not model
+ every aspect of each CPU, so the &vmguest; CPU does not match the
+ &vmhost; CPU exactly. But the ABI provided to the &vmguest; is
+ reproducible and during migration the complete CPU model definition is
+ transferred to the destination &vmhost;, ensuring the migrated
+ &vmguest; can see the exact same CPU model on the destination.
+
+
+ The host-passthrough model provides the &vmguest;
+ with a CPU that is exactly the same as the &vmhost; CPU. This can be
+ useful when the &vmguest; workload requires CPU features not available
+ in &libvirt;'s simplified host-model CPU. The
+ host-passthrough model comes with the disadvantage
+ of reduced migration capability. A &vmguest; with
+ host-passthrough model CPU can only be migrated to a
+ &vmhost; with identical hardware.
+
+
+ For more information on &libvirt;'s CPU model and topology options, see
+ the CPU model and topology documentation at
+ .
+
+
+ After you activate Manually set CPU topology, you
+ can specify a custom number of sockets, cores and threads for the CPU.
+
+
+
+
+ Memory
+
+ Memory contains information about the memory that is
+ available to &vmguest;.
+
+
+ Memory view
+
+
+
+
+
+
+
+
+
+
+
+ Total host memory
+
+
+ Total amount of memory installed on &vmhost;.
+
+
+
+
+ Current allocation
+
+
+ The amount of memory currently available to &vmguest;. You can
+ hotplug more memory by increasing this value up to the value of
+ Maximum allocation.
+
+
+
+
+ Enable shared memory
+
+
+ Specify if the virtual machine can use shared memory via the
+ memfd backed. It is a requirement for using
+ the virtiofs file system. Find more details
+ in
+ .
+
+
+
+
+ Maximum allocation
+
+
+ The maximum value to which you can hotplug the currently
+ available memory. Any change to this value takes effect after the
+ next &vmguest; reboot.
+
+
+
+
+ Enable launch security
+
+
+ If the &vmhost; supports AMD-SEV technology, activating this
+ option enables a secured guest with encrypted memory. This
+ option requires a virtual machine with chipset type Q35.
+ For more details, refer to .
+
+
+
+
+
+ Large memory &vmguest;s
+
+ &vmguest;s with memory requirements of 4 TB or more must either
+ use the host-passthrough CPU mode, or explicitly
+ specify the virtual CPU address size when using
+ host-model or custom CPU modes.
+ The default virtual CPU address size for these modes may not be
+ sufficient for memory configurations of 4 TB or more. The address size
+ can only be specified by editing the &vmguest;s XML configuration.
+ See for more
+ information on specifying virtual CPU address size.
+
+
+
+
+
+ Boot options
+
+ Boot Options introduces options affecting the
+ &vmguest; boot process.
+
+
+ Boot options
+
+
+
+
+
+
+
+
+
+
+ In the Autostart section, you can specify whether
+ the virtual machine should automatically start during the &vmhost; boot
+ phase.
+
+
+ In the Boot device order, activate the devices used
+ for booting &vmguest;. You can change their order with the up and down
+ arrow buttons on the right side of the list. To choose from a list of
+ bootable devices on &vmguest; start, activate Enable boot
+ menu.
+
+
+ To boot a different kernel than the one on the boot device, activate
+ Enable direct kernel boot and specify the paths to
+ the alternative kernel and initrd placed on the &vmhost; file system.
+ You can also specify kernel arguments that are passed to the loaded
+ kernel.
+
+
+
+
+ Storage
+
+
+ This section gives you a detailed description of configuration options
+ for storage devices. It includes both hard disks and removable media,
+ such as USB or CD-ROM drives.
+
+
+
+ Adding a new storage device
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ Storage.
+
+
+ Add a new storage
+
+
+
+
+
+
+
+
+
+
+
+
+ To create a qcow2 disk image in the default
+ location, activate Create a disk image for the virtual
+ machine and specify its size in gigabytes.
+
+
+ To gain more control over the disk image creation, activate
+ Select or create custom storage and click
+ Manage to manage storage pools and images. The
+ window Choose Storage Volume opens, which has
+ almost identical functionality as the Storage tab
+ described in .
+
+
+ Supported storage formats
+
+ &suse; only supports the following storage formats:
+ raw and qcow2.
+
+
+
+
+
+ After you create and specify the disk image file, specify
+ the Device type. It can be one of the following
+ options:
+
+
+
+
+ Disk device
+
+
+
+
+ CDROM device: does not allow using
+ Create a disk image for the virtual machine.
+
+
+
+
+ Floppy device: does not allow using
+ Create a disk image for the virtual machine.
+
+
+
+
+ LUN Passthrough: required to use an existing
+ SCSI storage directly without adding it into a storage pool.
+
+
+
+
+
+
+ Select the Bus type for your device. The list of
+ available options depends on the device type you selected in the
+ previous step. The types based on VirtIO use
+ paravirtualized drivers.
+
+
+
+
+ In the Advanced options section, select the
+ preferred Cache mode.
+
+
+
+
+
+ Confirm your settings with Finish. A new storage
+ device appears in the left panel.
+
+
+
+
+
+ Controllers
+
+
+ This section focuses on adding and configuring new controllers.
+
+
+
+ Adding a new controller
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ Controller.
+
+
+ Add a new controller
+
+
+
+
+
+
+
+
+
+
+
+
+ Select the type of the controller. You can choose from
+ IDE, Floppy,
+ SCSI, SATA, VirtIO
+ Serial (paravirtualized), USB, or
+ CCID (smart card devices).
+
+
+
+
+ Optionally, for a USB or SCSI controller, select a controller model.
+
+
+
+
+ Confirm your settings with Finish. A new
+ controller appears in the left panel.
+
+
+
+
+
+ Networking
+
+
+ This section describes how to add and configure new network devices.
+
+
+
+ Adding a new network device
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ Network.
+
+
+ Add a new network interface
+
+
+
+
+
+
+
+
+
+
+
+
+ From the Network source list, select the source
+ for the network connection. The list includes &vmhost;'s available
+ physical network interfaces, network bridges, or network bonds. You
+ can also assign the &vmguest; to an already defined virtual network.
+ See for more information on
+ setting up virtual networks with &vmm;.
+
+
+
+
+ Specify a MAC address for the network device.
+ While &vmm; pre-fills a random value for your convenience, it is
+ recommended to supply a MAC address appropriate for your network
+ environment to avoid network conflicts.
+
+
+
+
+ Select a device model from the list. You can either leave the
+ Hypervisor default, or specify one of
+ e1000, rtl8139, or
+ virtio models. virtio uses
+ paravirtualized drivers.
+
+
+
+
+ Confirm your settings with Finish. A new network
+ device appears in the left panel.
+
+
+
+
+
+ Input devices
+
+
+ This section focuses on adding and configuring new input devices, such as
+ a mouse, a keyboard or a tablet.
+
+
+
+ Adding a new input device
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ Input.
+
+
+ Add a new input device
+
+
+
+
+
+
+
+
+
+
+
+
+ Select a device type from the list.
+
+
+
+
+ Confirm your settings with Finish. A new input
+ device appears in the left panel.
+
+
+
+
+
+ Enabling seamless and synchronized mouse pointer movement
+
+ When you click within a &vmguest;'s console with the mouse, the pointer
+ is captured by the console window and cannot be used outside the
+ console unless it is explicitly released (by pressing
+ ). To
+ prevent the console from grabbing the key and to enable seamless
+ pointer movement between host and guest instead, follow the
+ instructions in to add an
+ EvTouch USB Graphics Tablet to the &vmguest;.
+
+
+ Adding a tablet has the additional advantage of synchronizing the mouse
+ pointer movement between &vmhost; and &vmguest; when using a graphical
+ environment on the guest. With no tablet configured on the guest, you
+ may often see two pointers with one dragging behind the other.
+
+
+
+
+ Video
+
+
+ This section describes how to add and configure new video devices.
+
+
+
+ Adding a video device
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ Video.
+
+
+
+
+ Add a new video device
+
+
+
+
+
+
+
+
+
+
+
+
+ Select a model from the drop-down box.
+
+
+ Secondary video devices
+
+ Only QXL and Virtio can be
+ added as secondary video devices.
+
+
+
+
+
+ Confirm your settings with Finish. A new video
+ device appears in the left panel.
+
+
+
+
+
+ USB redirectors
+
+
+ USB devices that are connected to the client machine can be redirected to
+ the &vmguest; by using USB Redirectors.
+
+
+
+ Adding a USB redirector
+
+
+ Below the left panel, click Add Hardware to open
+ the Add New Virtual Hardware window. There, select
+ USB Redirection.
+
+
+ Add a new USB redirector
+
+
+
+
+
+
+
+
+
+
+
+
+ Select a device type from the list. Depending on your configuration,
+ you can either select a Spice channel or a
+ TCP redirector.
+
+
+
+
+ Confirm your settings with Finish. A new USB
+ redirector appears in the left panel.
+
+
+
+
+
+ Miscellaneous
+
+
+
+ Smartcard
+
+
+ Smartcard functionality can be added via the
+ Smartcard element. A physical USB smartcard
+ reader can then be passed through to the &vmguest;.
+
+
+
+
+ Watchdog
+
+
+ Virtual watchdog devices are also supported. They can be created
+ via the Watchdog element. The model and
+ the action of the device can be specified.
+
+
+ Requirements for virtual watchdog devices
+
+ QA virtual watchdog devices require a specific driver and daemon
+ to be installed in the &vmguest;. Otherwise, the virtual watchdog
+ device does not work.
+
+
+
+
+
+ TPM
+
+
+ You can use the Host TPM device in the &vmguest; by adding TPM
+ functionality via the TPM element.
+
+
+ Virtual TPMs
+
+ The Host TPM can only be used in one &vmguest; at a time.
+
+
+
+
+
+
+
+ Adding a CD/DVD-ROM device with &vmm;
+
+
+ &kvm; supports CD or DVD-ROMs in &vmguest; either by directly accessing a
+ physical drive on the &vmhost; or by accessing ISO images. To create an
+ ISO image from an existing CD or DVD, use dd:
+
+
+&prompt.sudo;dd if=/dev/CD_DVD_DEVICE of=my_distro.iso bs=2048
+
+
+ To add a CD/DVD-ROM device to your &vmguest;, proceed as follows:
+
+
+
+
+
+ Double-click a &vmguest; entry in the &vmm; to open its console and
+ switch to the Details view with
+ ViewDetails.
+
+
+
+
+ Click Add Hardware and choose
+ Storage in the pop-up window.
+
+
+
+
+ Change the Device Type to IDE
+ CDROM.
+
+
+
+
+ Select Select or create custom storage.
+
+
+
+
+ To assign the device to a physical medium, enter the path to the
+ &vmhost;'s CD/DVD-ROM device, for example,
+ /dev/cdrom) next to
+ Manage. Alternatively, use
+ Manage to open a file browser and then click
+ Browse Local to select the device. Assigning
+ the device to a physical medium is only possible when the &vmm;
+ was started on the &vmhost;.
+
+
+
+
+ To assign the device to an existing image, click
+ Manage to choose an image from a storage pool.
+ If the &vmm; was started on the &vmhost;, alternatively choose an
+ image from another location on the file system by clicking
+ Browse Local. Select an image and close the
+ file browser with Choose Volume.
+
+
+
+
+
+
+ Save the new virtualized device with Finish.
+
+
+
+
+ Reboot the &vmguest; to make the new device available. For more
+ information, see
+ .
+
+
+
+
+
+ Adding a floppy device with &vmm;
+
+
+ Currently, &kvm; only supports the use of floppy disk images—using a
+ physical floppy drive is not supported. Create a floppy disk image from
+ an existing floppy using dd:
+
+
+&prompt.sudo;dd if=/dev/fd0 of=/var/lib/libvirt/images/floppy.img
+
+
+ To create an empty floppy disk image, use one of the following commands:
+
+
+
+
+ Raw image
+
+&prompt.sudo;dd if=/dev/zero of=/var/lib/libvirt/images/floppy.img bs=512 count=2880
+
+
+
+ FAT formatted image
+
+&prompt.sudo;mkfs.msdos -C /var/lib/libvirt/images/floppy.img 1440
+
+
+
+
+
+ To add a floppy device to your &vmguest;, proceed as follows:
+
+
+
+
+
+ Double-click a &vmguest; entry in the &vmm; to open its console and
+ switch to the Details view with
+ ViewDetails.
+
+
+
+
+ Click Add Hardware and choose
+ Storage in the pop-up window.
+
+
+
+
+ Change the Device Type to Floppy
+ Disk.
+
+
+
+
+ Choose Select or create custom storage and click
+ Manage to choose an existing image from a storage
+ pool. If &vmm; was started on the &vmhost;, alternatively choose an
+ image from another location on the file system by clicking
+ Browse Local. Select an image and close the file
+ browser with Choose Volume.
+
+
+
+
+ Save the new virtualized device with Finish.
+
+
+
+
+ Reboot the &vmguest; to make the new device available. For more
+ information, see
+ .
+
+
+
+
+
+ Ejecting and changing floppy or CD/DVD-ROM media with &vmm;
+
+
+ Whether you are using the &vmhost;'s physical CD/DVD-ROM device or an
+ ISO/floppy image: before you can change the media or image of an existing
+ device in the &vmguest;, you first need to disconnect
+ the media from the guest.
+
+
+
+
+
+ Double-click a &vmguest; entry in the &vmm; to open its console and
+ switch to the Details view with
+ ViewDetails.
+
+
+
+
+ Choose the Floppy or CD/DVD-ROM device and eject the
+ medium by clicking Disconnect.
+
+
+
+
+ To insert a new medium, click
+ Connect.
+
+
+
+
+ If using the &vmhost;'s physical CD/DVD-ROM device, first change
+ the media in the device (this may require unmounting it on the
+ &vmhost; before it can be ejected). Then choose CD-ROM
+ or DVD and select the device from the drop-down box.
+
+
+
+
+ If you are using an ISO image, choose ISO image
+ Location and select an image by clicking
+ Manage. When connecting from a remote host,
+ you may only choose images from existing storage pools.
+
+
+
+
+
+
+ Click OK to finish. The new media can now be
+ accessed in the &vmguest;.
+
+
+
+
+
+ Assigning a host PCI device to a &vmguest;
+
+
+ You can directly assign host-PCI devices to guests (PCI pass-through).
+ When the PCI device is assigned to one &vmguest;, it cannot be used on
+ the host or by another &vmguest; unless it is reassigned. A prerequisite
+ for this feature is a &vmhost; configuration as described in
+ .
+
+
+
+ Adding a PCI device with &vmm;
+
+ The following procedure describes how to assign a PCI device from the
+ host machine to a &vmguest; using &vmm;:
+
+
+
+
+ Double-click a &vmguest; entry in the &vmm; to open its console and
+ switch to the Details view with
+ ViewDetails.
+
+
+
+
+ Click Add Hardware and choose the PCI
+ Host Device category in the left panel. A list of
+ available PCI devices appears in the right part of the window.
+
+
+ Adding a PCI device
+
+
+
+
+
+
+
+
+
+
+
+
+ From the list of available PCI devices, choose the one you want to
+ pass to the guest. Confirm with Finish.
+
+
+
+
+ &slsa; 11 SP4 &kvm; guests
+
+ On a newer &qemu; machine type (pc-i440fx-2.0 or higher) with
+ &slsa; 11 SP4 &kvm; guests, the
+ acpiphp module is not
+ loaded by default in the guest. This module must be loaded to enable
+ hotplugging of disk and network devices. To load the module manually,
+ use the command modprobe acpiphp. It is also
+ possible to autoload the module by adding install acpiphp
+ /bin/true to the
+ /etc/modprobe.conf.local file.
+
+
+
+ &kvm; guests using &qemu; Q35 machine type
+
+ &kvm; guests using the &qemu; Q35 machine type have a PCI topology
+ that includes a pcie-root controller and seven
+ pcie-root-port controllers. The
+ pcie-root controller does not support hotplugging.
+ Each pcie-root-port controller supports
+ hotplugging a single PCIe device. PCI controllers cannot be
+ hotplugged, so plan accordingly and add more
+ pcie-root-ports for more than seven hotplugged
+ PCIe devices. A pcie-to-pci-bridge controller can
+ be added to support hotplugging legacy PCI devices. See
+ for more
+ information about PCI topology between &qemu; machine types.
+
+
+
+
+
+ Assigning a host USB device to a &vmguest;
+
+
+ Analogous to assigning host PCI devices (see
+ ), you can directly assign
+ host USB devices to guests. When the USB device is assigned to one
+ &vmguest;, it cannot be used on the host or by another &vmguest; unless
+ it is reassigned.
+
+
+
+ Adding a USB device with &vmm;
+
+ To assign a host USB device to &vmguest; using &vmm;, follow these
+ steps:
+
+
+
+
+ Double-click a &vmguest; entry in the &vmm; to open its console and
+ switch to the Details view with
+ ViewDetails.
+
+
+
+
+ Click Add Hardware and choose the USB
+ Host Device category in the left panel. A list of
+ available USB devices appears in the right part of the window.
+
+
+ Adding a USB device
+
+
+
+
+
+
+
+
+
+
+
+
+ From the list of available USB devices, choose the one you want to
+ pass to the guest. Confirm with Finish. The new
+ USB device appears in the left pane of the
+ Details view.
+
+
+ USB device removal
+
+ To remove the host USB device assignment, click it in the left
+ pane of the Details view and confirm with
+ Remove.
+
+
+
+
+
+
+
diff --git a/references/libvirt_configuration_virsh.xml b/references/libvirt_configuration_virsh.xml
new file mode 100644
index 000000000..8a60b4430
--- /dev/null
+++ b/references/libvirt_configuration_virsh.xml
@@ -0,0 +1,2122 @@
+
+
+ %entities;
+]>
+
+
+ Configuring virtual machines with &virsh;
+
+
+
+ You can use &virsh; to configure virtual machines (VM) on the command
+ line as an alternative to using the &vmm;. With &virsh;, you can
+ control the state of a VM, edit the configuration of a VM or even
+ migrate a VM to another host. The following sections describe how to
+ manage VMs by using &virsh;.
+
+
+
+
+ yes
+
+
+
+ Editing the VM configuration
+
+
+ The configuration of a VM is stored in an XML file in
+ /etc/libvirt/qemu/ and looks like this:
+
+
+
+ Example XML configuration file
+
+<domain type='kvm'>
+ <name>sles15</name>
+ <uuid>ab953e2f-9d16-4955-bb43-1178230ee625</uuid>
+ <memory unit='KiB'>2097152</memory>
+ <currentMemory unit='KiB'>2097152</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-q35-2.0'>hvm</type>
+ </os>
+ <features>...</features>
+ <cpu mode='custom' match='exact' check='partial'>
+ <model fallback='allow'>Skylake-Client-IBRS</model>
+ </cpu>
+ <clock>...</clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <pm>
+ <suspend-to-mem enabled='no'/>
+ <suspend-to-disk enabled='no'/>
+ </pm>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>...</disk>
+ </devices>
+ ...
+</domain>
+
+
+
+
+ To edit the configuration of a &vmguest;, check if it is offline:
+
+
+&prompt.sudo;virsh list --inactive
+
+
+ If your &vmguest; is in this list, you can safely edit its configuration:
+
+
+&prompt.sudo;virsh edit NAME_OF_VM_GUEST
+
+
+
+ Before saving the changes, &virsh; validates your input against a RelaxNG
+ schema.
+
+
+
+ Changing the machine type
+
+
+ When installing with the virt-install tool, the
+ machine type for a &vmguest; is pc-q35 by default.
+ The machine type is stored in the &vmguest;'s configuration file in the
+ type element:
+
+
+<type arch='x86_64' machine='pc-q35-2.3'>hvm</type>
+
+
+ As an example, the following procedure shows how to change this value to
+ the machine type q35. The value q35
+ is an Intel* chipset and includes
+ , supports up to 12 USB ports, and
+ has support for and
+ .
+
+
+
+
+ Changing machine type
+
+
+ Check whether your &vmguest; is inactive:
+
+&prompt.sudo;virsh list --inactive
+Id Name State
+----------------------------------------------------
+- sles15 shut off
+
+
+
+ Edit the configuration for this &vmguest;:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Replace the value of the machine
+ attribute with pc-q35-2.0 :
+
+<type arch='x86_64' machine='pc-q35-2.0'>hvm</type>
+
+
+
+ Restart the &vmguest;:
+
+&prompt.sudo;virsh start sles15
+
+
+
+ Check if the machine type has changed. Log in to the &vmguest; and
+ run the following command:
+
+&prompt.sudo;dmidecode | grep Product
+Product Name: Standard PC (Q35 + ICH9, 2009)
+
+
+
+
+ Machine type update recommendations
+
+ Whenever the &qemu; version on the host system is upgraded, for example,
+ when upgrading the &vmhost; to a new service pack, upgrade the machine
+ type of the &vmguest;s to the latest available version. To check, use
+ the command qemu-system-x86_64 -M help on the
+ &vmhost;.
+
+
+ The default machine type pc-i440fx, for example, is
+ regularly updated. If your &vmguest; still runs with a machine type of
+ pc-i440fx-1.X, we
+ strongly recommend an update to
+ pc-i440fx-2.X. This
+ allows taking advantage of the most recent updates and corrections in
+ machine definitions, and ensures better future compatibility.
+
+
+
+
+ Configuring hypervisor features
+
+
+ libvirt automatically enables a default set of
+ hypervisor features that are sufficient in most circumstances, but also
+ allows enabling and disabling features as needed.
+ Hypervisor features can be
+ configured with &virsh;. Look for the <features> element
+ in the &vmguest;'s configuration file and adjust its features as
+ required.
+
+
+ See the Hypervisor features section of the libvirt
+ Domain XML format manual at
+
+ for more information.
+
+
+
+ Configuring CPU
+
+
+ Many aspects of the virtual CPUs presented to &vmguest;s are configurable
+ with &virsh;. The number of current and maximum CPUs allocated to a
+ &vmguest; can be changed, as well as the model of the CPU and its feature
+ set. The following subsections describe how to change the common CPU
+ settings of a &vmguest;.
+
+
+
+ Configuring the number of CPUs
+
+ The number of allocated CPUs is stored in the &vmguest;'s XML
+ configuration file in /etc/libvirt/qemu/ in the
+ vcpu element:
+
+<vcpu placement='static'>1</vcpu>
+
+ In this example, the &vmguest; has only one allocated CPU. The
+ following procedure shows how to change the number of allocated CPUs
+ for the &vmguest;:
+
+
+
+
+ Check whether your &vmguest; is inactive:
+
+&prompt.sudo;virsh list --inactive
+Id Name State
+----------------------------------------------------
+- sles15 shut off
+
+
+
+ Edit the configuration for an existing &vmguest;:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Change the number of allocated CPUs:
+
+<vcpu placement='static'>2</vcpu>
+
+
+
+ Restart the &vmguest;:
+
+&prompt.sudo;virsh start sles15
+
+
+
+ Check if the number of CPUs in the VM has changed.
+
+
+&prompt.sudo;virsh vcpuinfo sled15
+VCPU: 0
+CPU: N/A
+State: N/A
+CPU time N/A
+CPU Affinity: yy
+
+VCPU: 1
+CPU: N/A
+State: N/A
+CPU time N/A
+CPU Affinity: yy
+
+
+
+
+ You can also change the number of CPUs while the &vmguest; is running.
+ CPUs can be hotplugged until the maximum number configured at &vmguest;
+ start is reached. Likewise, they can be hot-unplugged until the lower
+ limit of 1 is reached. The following example shows changing the
+ active CPU count from 2 to a predefined maximum of 4.
+
+
+
+
+ Check the current live vcpu count:
+
+&prompt.sudo;virsh vcpucount sles15 | grep live
+maximum live 4
+current live 2
+
+
+
+
+ Change the current, or active, number of CPUs to 4:
+
+&prompt.sudo;virsh setvcpus sles15 --count 4 --live
+
+
+
+ Check that the current live vcpu count is now 4:
+
+&prompt.sudo;virsh vcpucount sles15 | grep live
+maximum live 4
+current live 4
+
+
+
+
+
+
+ Configuring the CPU model
+
+ The CPU model exposed to a &vmguest; can often influence the workload
+ running within it. The default CPU model is derived from a CPU mode
+ known as host-model.
+
+<cpu mode='host-model'/>
+
+ When starting a &vmguest; with the CPU mode host-model,
+ &libvirt; copies its model of the host CPU into the &vmguest;
+ definition. The host CPU model and features copied to the &vmguest;
+ definition can be observed in the output of the virsh
+ capabilities.
+
+
+ Another interesting CPU mode is host-passthrough.
+
+<cpu mode='host-passthrough'/>
+
+ When starting a &vmguest; with the CPU mode
+ host-passthrough, it is presented with a CPU that is
+ exactly the same as the &vmhost; CPU. This can be useful when the
+ &vmguest; workload requires CPU features not available in &libvirt;'s
+ simplified host-model CPU. The
+ host-passthrough CPU mode comes with the
+ disadvantage of reduced migration flexibility. A &vmguest; with
+ host-passthrough CPU mode can only be migrated to a
+ &vmhost; with identical hardware.
+
+
+ When using the host-passthrough CPU mode, it is
+ still possible to disable undesirable features. The following
+ configuration presents the &vmguest; with a CPU that is exactly the
+ same as the host CPU but with the vmx feature
+ disabled.
+
+
+<cpu mode='host-passthrough'>
+ <feature policy='disable' name='vmx'/>
+ </cpu>
+
+
+ The custom CPU mode is another common mode used to
+ define a normalized CPU that can be migrated throughout dissimilar
+ hosts in a cluster. For example, in a cluster with hosts containing
+ Nehalem, IvyBridge and SandyBridge CPUs, the &vmguest; can be
+ configured with a custom CPU mode that contains a
+ Nehalem CPU model.
+
+
+<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Nehalem</model>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='dca'/>
+ <feature policy='require' name='rdtscp'/>
+ <feature policy='require' name='invtsc'/>
+ </cpu>
+
+
+ For more information on &libvirt;'s CPU model and topology options, see
+ the CPU model and topology documentation at
+ .
+
+
+
+
+ Changing boot options
+
+
+ The boot menu of the &vmguest; can be found in the os element
+ and looks similar to this example:
+
+
+<os firmware='efi'>
+ <type arch='x86_64' machine='pc-q35-10.0'>hvm</type>
+ <firmware>
+ <feature enabled='yes' name='enrolled-keys'/>
+ <feature enabled='yes' name='secure-boot'/>
+ </firmware>
+ <loader readonly='yes' secure='yes' type='pflash' format='raw'>/usr/share/qemu/ovmf-x86_64-smm-ms-code.bin</loader>
+ <nvram template='/usr/share/qemu/ovmf-x86_64-smm-ms-vars.bin' templateFormat='raw' format='raw'>/var/lib/libvirt/qemu/nvram/win11_VARS.fd</nvram>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ </os>
+
+
+ In this example, two devices are available,
+ hd and cdrom .
+ The configuration also reflects the actual boot order, so the
+ hd comes before the
+ cdrom .
+
+
+
+ Changing boot order
+
+ The &vmguest;'s boot order is represented through the order of devices
+ in the XML configuration file. As the devices are interchangeable, it
+ is possible to change the boot order of the &vmguest;.
+
+
+
+
+ Open the &vmguest;'s XML configuration.
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Change the sequence of the bootable devices.
+
+
+...
+<boot dev='cdrom'/>
+<boot dev='hd'/>
+...
+
+
+
+
+ Check if the boot order was changed successfully by looking at the
+ boot menu in the BIOS of the &vmguest;.
+
+
+
+
+
+
+ Using direct kernel boot
+
+ Direct Kernel Boot allows you to boot from a kernel and initrd stored
+ on the host. Set the path to both files in the kernel and
+ initrd elements:
+
+<os>
+ ...
+ <kernel>/root/f8-i386-vmlinuz</kernel>
+ <initrd>/root/f8-i386-initrd</initrd>
+ ...
+<os>
+
+ To enable Direct Kernel Boot:
+
+
+
+
+ Open the &vmguest;'s XML configuration:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Inside the os element, add a kernel element
+ and the path to the kernel file on the host:
+
+...
+<kernel>/root/f8-i386-vmlinuz</kernel>
+...
+
+
+
+ Add an initrd element and the path to the initrd file on
+ the host:
+
+...
+<initrd>/root/f8-i386-initrd</initrd>
+...
+
+
+
+ Start your VM to boot from the new kernel:
+
+&prompt.sudo;virsh start sles15
+
+
+
+
+
+ Configuring memory allocation
+
+
+ The amount of memory allocated for the &vmguest; can also be configured
+ with &virsh;. It is stored in the memory element and defines
+ the maximum allocation of memory for the &vmguest; at boot time. The
+ optional currentMemory element defines the actual memory
+ allocated to the &vmguest;. currentMemory can be less than
+ memory, allowing for increasing (or
+ ballooning) the memory while the &vmguest; is
+ running. If currentMemory is omitted, it defaults to the same
+ value as the memory element.
+
+
+
+ You can adjust memory settings by editing the &vmguest; configuration,
+ but be aware that changes do not take place until the next boot. The
+ following steps demonstrate changing a &vmguest; to boot with 4G of
+ memory, but allow later expansion to 8G:
+
+
+
+
+
+ Open the &vmguest;'s XML configuration:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Search for the memory element and set to 8G:
+
+...
+<memory unit='KiB'>8388608</memory>
+...
+
+
+
+ If the currentMemory element does not exist, add it below
+ the memory element, or change its value to 4G:
+
+
+[...]
+<memory unit='KiB'>8388608</memory>
+<currentMemory unit='KiB'>4194304</currentMemory>
+[...]
+
+
+
+
+ Changing the memory allocation while the &vmguest; is running can be done
+ with the setmem subcommand. The following example
+ shows increasing the memory allocation to 8G:
+
+
+
+
+
+ Check &vmguest; existing memory settings:
+
+&prompt.sudo;virsh dominfo sles15 | grep memory
+Max memory: 8388608 KiB
+Used memory: 4194608 KiB
+
+
+
+
+ Change the used memory to 8G:
+
+&prompt.sudo;virsh setmem sles15 8388608
+
+
+
+ Check the updated memory settings:
+
+&prompt.sudo;virsh dominfo sles15 | grep memory
+Max memory: 8388608 KiB
+Used memory: 8388608 KiB
+
+
+
+
+
+ Large memory &vmguest;s
+
+ &vmguest;s with memory requirements of 4 TB or more must either
+ use the host-passthrough CPU mode, or explicitly
+ specify the virtual CPU address size when using
+ host-model or custom CPU modes.
+ The default virtual CPU address size may not be sufficient for memory
+ configurations of 4 TB or more. The following example shows how to
+ use the &vmhost;'s physical CPU address size when using the
+ host-model CPU mode.
+
+
+[...]
+<cpu mode='host-model' check='partial'>
+<maxphysaddr mode='passthrough'>
+</cpu>
+[...]
+
+ For more information on specifying virtual CPU address size, see the
+ maxphysaddr option in the CPU model and
+ topology documentation at
+ .
+
+
+
+
+ Adding a PCI device
+
+
+ To assign a PCI device to &vmguest; with &virsh;, follow these steps:
+
+
+
+
+
+ Identify the host PCI device to assign to the &vmguest;. In the
+ following example, we are assigning a DEC network card to the guest:
+
+&prompt.sudo;lspci -nn
+[...]
+03:07.0 Ethernet controller [0200]: Digital Equipment Corporation DECchip \
+21140 [FasterNet] [1011:0009] (rev 22)
+[...]
+
+ Write down the device ID, 03:07.0 in this example.
+
+
+
+
+ Gather detailed information about the device using virsh
+ nodedev-dumpxml ID. To get the
+ ID, replace the colon and the period in
+ the device ID (03:07.0) with underscores. Prefix
+ the result with pci_0000_:
+ pci_0000_03_07_0.
+
+&prompt.sudo;virsh nodedev-dumpxml pci_0000_03_07_0
+<device>
+ <name>pci_0000_03_07_0</name>
+ <path>/sys/devices/pci0000:00/0000:00:14.4/0000:03:07.0</path>
+ <parent>pci_0000_00_14_4</parent>
+ <driver>
+ <name>tulip</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>3</bus>
+ <slot>7</slot>
+ <function>0</function>
+ <product id='0x0009'>DECchip 21140 [FasterNet]</product>
+ <vendor id='0x1011'>Digital Equipment Corporation</vendor>
+ <numa node='0'/>
+ </capability>
+</device>
+
+ Write down the values for domain, bus and function (see the previous
+ XML code printed in bold).
+
+
+
+
+ Detach the device from the host system before attaching it to the
+ &vmguest;:
+
+&prompt.sudo;virsh nodedev-detach pci_0000_03_07_0
+ Device pci_0000_03_07_0 detached
+
+ Multi-function PCI devices
+
+ When using a multi-function PCI device that does not support FLR
+ (function level reset) or PM (power management) reset, you need to
+ detach all its functions from the &vmhost;. The whole device must
+ be reset for security reasons. libvirt
+ refuses to assign the device if one of its functions is still in
+ use by the &vmhost; or another &vmguest;.
+
+
+
+
+
+ Convert the domain, bus, slot, and function value from decimal to
+ hexadecimal. In our example, domain = 0, bus = 3, slot = 7, and
+ function = 0. Ensure that the values are inserted in the right order:
+
+&prompt.user;printf "<address domain='0x%x' bus='0x%x' slot='0x%x' function='0x%x'/>\n" 0 3 7 0
+
+ This results in:
+
+<address domain='0x0' bus='0x3' slot='0x7' function='0x0'/>
+
+
+
+ Run virsh edit on your domain, and add the
+ following device entry in the <devices>
+ section using the result from the previous step:
+
+<hostdev mode='subsystem' type='pci' managed='yes'>
+ <source>
+ <address domain='0x0' bus='0x03' slot='0x07' function='0x0'/>
+ </source>
+</hostdev>
+
+ managed compared to unmanaged
+
+ libvirt recognizes two modes for handling
+ PCI devices: they can be managed or
+ unmanaged. In the managed case,
+ libvirt handles all details of unbinding
+ the device from the existing driver if needed, resetting the
+ device, binding it to vfio-pci before
+ starting the domain, etc. When the domain is terminated or the
+ device is removed from the domain, libvirt
+ unbinds from vfio-pci and rebinds to the
+ original driver when using a managed device. If the device is
+ unmanaged, the user must ensure that all these management aspects
+ of the device are done before assigning it to a domain, and after
+ the device is no longer used by the domain.
+
+
+
+ In the example above, the managed='yes' option
+ means that the device is managed. To switch the device mode to
+ unmanaged, set managed='no' in the listing above.
+ If you do so, you need to take care of the related driver with the
+ virsh nodedev-detach and virsh
+ nodedev-reattach commands. Before starting the &vmguest;,
+ you need to detach the device from the host by running virsh
+ nodedev-detach pci_0000_03_07_0. In case the &vmguest; is
+ not running, you can make the device available for the host by
+ running virsh nodedev-reattach pci_0000_03_07_0.
+
+
+
+
+ Shut down the &vmguest; and disable &selnx; if it is running on the
+ host.
+
+&prompt.sudo;setsebool -P virt_use_sysfs 1
+
+
+
+ Start your &vmguest; to make the assigned PCI device available:
+
+&prompt.sudo;virsh start sles15
+
+
+
+
+ &slsa;11 SP4 &kvm; guests
+
+ On a newer &qemu; machine type (pc-i440fx-2.0 or higher) with &slsa; 11
+ SP4 &kvm; guests, the acpiphp
+ module is not loaded by default in the guest. This module must be
+ loaded to enable hotplugging of disk and network devices. To load the
+ module manually, use the command modprobe acpiphp.
+ It is also possible to autoload the module by adding install
+ acpiphp /bin/true to the
+ /etc/modprobe.conf.local file.
+
+
+
+
+ &kvm; guests using &qemu; Q35 machine type
+
+ &kvm; guests using the &qemu; Q35 machine type have a PCI topology that
+ includes a pcie-root controller and seven
+ pcie-root-port controllers. The
+ pcie-root controller does not support hotplugging.
+ Each pcie-root-port controller supports hotplugging
+ a single PCIe device. PCI controllers cannot be hotplugged, so plan
+ accordingly and add more pcie-root-ports to hotplug
+ more than seven PCIe devices. A pcie-to-pci-bridge
+ controller can be added to support hotplugging legacy PCI devices. See
+ for more
+ information about PCI topology between &qemu; machine types.
+
+
+
+
+ &pciback; for &zseries;
+
+ To support &zseries;, &qemu; extended PCI representation by allowing the user to
+ configure extra attributes. Two more
+ attributes— and
+ —were added to the
+ <zpci/> &libvirt; specification.
+ represents user-defined identifier, while
+ represents PCI function identifier. These
+ attributes are optional and if you do not specify them, they are
+ automatically generated with non-conflicting values.
+
+
+ To include zPCI attribute in your domain specification, use the
+ following example definition:
+
+
+<controller type='pci' index='0' model='pci-root'/>
+<controller type='pci' index='1' model='pci-bridge'>
+ <model name='pci-bridge'/>
+ <target chassisNr='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'>
+ <zpci uid='0x0001' fid='0x00000000'/>
+ </address>
+</controller>
+<interface type='bridge'>
+ <source bridge='virbr0'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x01' slot='0x01' function='0x0'>
+ <zpci uid='0x0007' fid='0x00000003'/>
+ </address>
+</interface>
+
+
+
+
+ Adding a USB device
+
+
+ To assign a USB device to &vmguest; using &virsh;, follow these steps:
+
+
+
+
+
+ Identify the host USB device to assign to the &vmguest;:
+
+&prompt.sudo;lsusb
+[...]
+Bus 001 Device 003: ID 0557:2221 ATEN International Co., Ltd Winbond Hermon
+[...]
+
+ Write down the vendor and product IDs. In our example, the vendor ID
+ is 0557 and the product ID is
+ 2221.
+
+
+
+
+ Run virsh edit on your domain, and add the
+ following device entry in the <devices>
+ section using the values from the previous step:
+
+<hostdev mode='subsystem' type='usb'>
+ <source startupPolicy='optional'>
+ <vendor id='0557'/>
+ <product id='2221'/>
+ </source>
+</hostdev>
+
+ Vendor/product or device's address
+
+ Instead of defining the host device with
+ vendor and
+ product IDs, you can use the
+ address element as described for host
+ PCI devices in .
+
+
+
+
+
+ Shut down the &vmguest; and disable &selnx; if it is running on the
+ host:
+
+&prompt.sudo;setsebool -P virt_use_sysfs 1
+
+
+
+ Start your &vmguest; to make the assigned PCI device available:
+
+&prompt.sudo;virsh start sles15
+
+
+
+
+ Adding SR-IOV devices
+
+
+ Single Root I/O Virtualization capable
+ devices can replicate their
+ resources, so they appear as multiple devices. Each of these
+ pseudo-devices can be assigned to a &vmguest;.
+
+
+
+ ; is an industry specification that was
+ created by the Peripheral Component Interconnect Special Interest Group
+ (PCI-SIG) consortium. It introduces physical functions (PF) and virtual
+ functions (VF). PFs are full
+ functions used to manage and configure the device. PFs also can move
+ data. VFs lack the configuration and management part—they only can move
+ data and a reduced set of configuration functions. As VFs do not have all
+ functions, the host operating
+ system or the must support
+ ; to access and initialize VFs.
+ The theoretical maximum for VFs is 256 per device (consequently the
+ maximum for a dual-port Ethernet card would be 512). In practice, this
+ maximum is much lower, since each VF consumes resources.
+
+
+
+ Requirements
+
+ The following requirements must be met to use &sriov;:
+
+
+
+
+ An &sriov; capable network card (as of
+ &productname;
+ 15,
+ only network cards support &sriov;). For more information, refer
+ to .
+
+
+
+
+ An AMD64/Intel 64 host supporting hardware virtualization (AMD-V or
+ Intel VT-x). For more information, see the section
+ Architecture Support in the article Virtualization
+ Limits and Support.
+
+
+
+
+ A chipset that supports device assignment (AMD-Vi or Intel
+ )
+
+
+
+
+ &libvirt; 0.9.10 or better
+
+
+
+
+ ; drivers must be loaded and configured
+ on the host system
+
+
+
+
+ A host configuration that meets the requirements
+
+
+
+
+ A list of the PCI addresses of the VFs assigned to &vmguest;s
+
+
+
+
+ Checking if a device is SR-IOV-capable
+
+ The information whether a device is SR-IOV-capable can be obtained
+ from its PCI descriptor by running lspci. A device
+ that supports ; reports a capability
+ similar to the following:
+
+Capabilities: [160 v1] Single Root I/O Virtualization
+
+
+ Adding an SR-IOV device at &vmguest; creation
+
+ Before adding an SR-IOV device to a &vmguest; when initially setting
+ it up, the &vmhost; already needs to be configured as described in
+ .
+
+
+
+
+
+ Loading and configuring the SR-IOV host drivers
+
+ To access and initialize VFs, an SR-IOV-capable driver needs to be
+ loaded on the host system.
+
+
+
+
+ Before loading the driver, make sure the card is properly detected
+ by running lspci. The following example shows
+ the lspci output for the dual-port Intel 82576NS
+ network card:
+
+&prompt.sudo;/sbin/lspci | grep 82576
+01:00.0 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+01:00.1 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+04:00.0 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+04:00.1 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+
+ In case the card is not detected, the hardware virtualization
+ support in the BIOS/EFI may not have been enabled. To check if
+ hardware virtualization support is enabled, look at the settings in
+ the host's BIOS.
+
+
+
+
+ Check whether the ; driver is already
+ loaded by running lsmod. In the following
+ example, a check for the igb driver (for the Intel 82576NS network
+ card) returns a result. That means the driver is already loaded. If
+ the command returns nothing, the driver is not loaded.
+
+&prompt.sudo;/sbin/lsmod | egrep "^igb "
+igb 185649 0
+
+
+
+ Skip the following step if the driver is already loaded. If the
+ ; driver is not yet loaded, the
+ non-; driver needs to be removed first,
+ before loading the new driver. Use rmmod to
+ unload a driver. The following example unloads the
+ non-; driver for the Intel 82576NS
+ network card:
+
+&prompt.sudo;/sbin/rmmod igbvf
+
+
+
+ Load the ; driver subsequently using
+ the modprobe command—the VF parameter
+ (max_vfs) is mandatory:
+
+&prompt.sudo;/sbin/modprobe igb max_vfs=8
+
+
+ Unsure if the following procedure is really needed.
+
+ As an alternative, you can also load the driver via SYSFS:
+
+
+
+
+ Find the PCI ID of the physical NIC by listing Ethernet devices:
+
+&prompt.sudo;lspci | grep Eth
+06:00.0 Ethernet controller: Emulex Corporation OneConnect NIC (Skyhawk) (rev 10)
+06:00.1 Ethernet controller: Emulex Corporation OneConnect NIC (Skyhawk) (rev 10)
+
+
+
+ To enable VFs, echo the number of desired VFs to load to the
+ sriov_numvfs parameter:
+
+&prompt.sudo;echo 1 > /sys/bus/pci/devices/0000:06:00.1/sriov_numvfs
+
+
+
+ Verify that the VF NIC was loaded:
+
+&prompt.sudo;lspci | grep Eth
+06:00.0 Ethernet controller: Emulex Corporation OneConnect NIC (Skyhawk) (rev 10)
+06:00.1 Ethernet controller: Emulex Corporation OneConnect NIC (Skyhawk) (rev 10)
+06:08.0 Ethernet controller: Emulex Corporation OneConnect NIC (Skyhawk) (rev 10)
+
+
+
+ Obtain the maximum number of VFs available:
+
+&prompt.sudo;lspci -vvv -s 06:00.1 | grep 'Initial VFs'
+ Initial VFs: 32, Total VFs: 32, Number of VFs: 0,
+Function Dependency Link: 01
+
+
+
+ Create a /etc/systemd/system/before.service
+ file which loads VF via SYSFS on boot:
+
+[Unit]
+Before=
+[Service]
+Type=oneshot
+RemainAfterExit=true
+ExecStart=/bin/bash -c "echo 1 > /sys/bus/pci/devices/0000:06:00.1/sriov_numvfs"
+# beware, executable is run directly, not through a shell, check the man pages
+# systemd.service and systemd.unit for full syntax
+[Install]
+# target in which to start the service
+WantedBy=multi-user.target
+#WantedBy=graphical.target
+
+
+
+ Before starting the VM, it is required to create another service
+ file (after-local.service) pointing to the
+ /etc/init.d/after.local script that detaches
+ the NIC. Otherwise the VM would fail to start:
+
+[Unit]
+Description=/etc/init.d/after.local Compatibility
+After=libvirtd.service
+Requires=libvirtd.service
+[Service]
+Type=oneshot
+ExecStart=/etc/init.d/after.local
+RemainAfterExit=true
+
+[Install]
+WantedBy=multi-user.target
+
+
+
+ Copy it to /etc/systemd/system.
+
+#! /bin/sh
+# ...
+virsh nodedev-detach pci_0000_06_08_0
+
+ Save it as /etc/init.d/after.local.
+
+
+
+
+ Reboot the machine and check if the SR-IOV driver is loaded by
+ re-running the lspci command from the first step
+ of this procedure. If the SR-IOV driver was loaded successfully you
+ should see additional lines for the VFs:
+
+01:00.0 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+01:00.1 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+01:10.0 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+01:10.1 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+01:10.2 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+[...]
+04:00.0 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+04:00.1 Ethernet controller: Intel Corporation 82576NS Gigabit Network Connection (rev 01)
+04:10.0 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+04:10.1 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+04:10.2 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01)
+[...]
+
+
+
+
+
+ Adding a VF network device to a &vmguest;
+
+ When the ; hardware is properly set up on
+ the &vmhost;, you can add VFs to &vmguest;s. To do so, you need to
+ collect specific data first.
+
+
+ Adding a VF network device to an existing &vmguest;
+
+ The following procedure uses example data. Replace it with
+ appropriate data from your setup.
+
+
+
+ Use the virsh nodedev-list command to get the
+ PCI address of the VF you want to assign and its corresponding PF.
+ Numerical values from the lspci output shown in
+ , for example,
+ 01:00.0 or 04:00.1, are
+ transformed by adding the prefix pci_0000_ and
+ by replacing colons and dots with underscores. So a PCI ID listed
+ as 04:00.0 by lspci is listed
+ as pci_0000_04_00_0 by virsh. The following
+ example lists the PCI IDs for the second port of the Intel 82576NS
+ network card:
+
+&prompt.sudo;virsh nodedev-list | grep 0000_04_
+pci_0000_04_00_0
+pci_0000_04_00_1
+pci_0000_04_10_0
+pci_0000_04_10_1
+pci_0000_04_10_2
+pci_0000_04_10_3
+pci_0000_04_10_4
+pci_0000_04_10_5
+pci_0000_04_10_6
+pci_0000_04_10_7
+pci_0000_04_11_0
+pci_0000_04_11_1
+pci_0000_04_11_2
+pci_0000_04_11_3
+pci_0000_04_11_4
+pci_0000_04_11_5
+
+ The first two entries represent the
+ PFs, whereas the other entries
+ represent the VFs.
+
+
+
+
+ Run the following virsh nodedev-dumpxml command
+ on the PCI ID of the VF you want to add:
+
+&prompt.sudo;virsh nodedev-dumpxml pci_0000_04_10_0
+<device>
+ <name>pci_0000_04_10_0</name>
+ <parent>pci_0000_00_02_0</parent>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>0</function>
+ <product id='0x10ca'>82576 Virtual Function</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
+ </capability>
+ </capability>
+</device>
+
+ The following data is needed for the next step:
+
+
+
+
+ <domain>0</domain>
+
+
+
+
+ <bus>4</bus>
+
+
+
+
+ <slot>16</slot>
+
+
+
+
+ <function>0</function>
+
+
+
+
+
+
+ Create a temporary XML file, for example,
+ /tmp/vf-interface.xml, containing the data
+ necessary to add a VF network device to an existing &vmguest;. The
+ minimal content of the file needs to look like the following:
+
+<interface type='hostdev'>
+ <source>
+ <address type='pci' domain='0' bus='11' slot='16' function='0'2/>
+ </source>
+</interface>
+
+
+
+ VFs do not get a fixed MAC address; it changes every time the
+ host reboots. When adding network devices the
+ traditional way with
+ hostdev, it would require to
+ reconfigure the &vmguest;'s network device after each reboot of
+ the host, because of the MAC address change. To avoid this kind
+ of problem, &libvirt; introduced the
+ hostdev value, which sets up
+ network-specific data before assigning the
+ device.
+
+
+
+
+ Specify the data you acquired in the previous step here.
+
+
+
+
+
+
+ In case a device is already attached to the host, it cannot be
+ attached to a &vmguest;. To make it available for guests, detach it
+ from the host first:
+
+&prompt.sudo;virsh nodedev-detach pci_0000_04_10_0
+
+
+
+ Add the VF interface to an existing &vmguest;:
+
+&prompt.sudo;virsh attach-device GUEST /tmp/vf-interface.xml --OPTION
+
+ GUEST needs to be replaced by the domain
+ name, ID or UUID of the &vmguest;.
+ --OPTION can be one of the following:
+
+
+
+
+
+
+ This option always adds the device to the domain's persistent
+ XML. If the domain is running, the device is hotplugged.
+
+
+
+
+
+
+
+ This option affects the persistent XML only, even if the
+ domain is running. The device appears in the &vmguest; on
+ next boot.
+
+
+
+
+
+
+
+ This option affects a running domain only. If the domain is
+ inactive, the operation fails. The device is not persisted in
+ the XML and becomes available in the &vmguest; on next boot.
+
+
+
+
+
+
+
+ This option affects the current state of the domain. If the
+ domain is inactive, the device is added to the persistent XML
+ and becomes available on next boot. If the domain is active,
+ the device is hotplugged but not added to the persistent XML.
+
+
+
+
+
+
+
+ To detach a VF interface, use the virsh
+ detach-device command, which also takes the options
+ listed above.
+
+
+
+
+
+
+ Dynamic allocation of VFs from a pool
+
+ If you define the PCI address of a VF into a &vmguest;'s configuration
+ statically as described in
+ , it is hard to migrate
+ such guest to another host. The host must have identical hardware in
+ the same location on the PCI bus, or the &vmguest; configuration must
+ be modified before each start.
+
+
+ Another approach is to create a &libvirt; network with a device pool
+ that contains all the VFs of an ; device.
+ The &vmguest; then references this network, and each time it is
+ started, a single VF is dynamically allocated to it. When the &vmguest;
+ is stopped, the VF is returned to the pool, available for another
+ guest.
+
+
+ Defining network with pool of VFs on &vmhost;
+
+ The following example of network definition creates a pool of all VFs
+ for the ; device with its physical
+ function (PF) at the network interface eth0 on the
+ host:
+
+<network>
+ <name>passthrough</name>
+ <forward mode='hostdev' managed='yes'>
+ <pf dev='eth0'/>
+ </forward>
+ </network>
+
+ To use this network on the host, save the above code to a file, for
+ example /tmp/passthrough.xml, and execute the
+ following commands. Remember to replace eth0 with
+ the real network interface name of your ;
+ device's PF:
+
+&prompt.sudo;virsh net-define /tmp/passthrough.xml
+&prompt.sudo;virsh net-autostart passthrough
+&prompt.sudo;virsh net-start passthrough
+
+
+ Configuring &vmguest;s to use VF from the pool
+
+ The following example of &vmguest; device interface definition uses a
+ VF of the ; device from the pool created
+ in . &libvirt;
+ automatically derives the list of all VFs associated with that PF the
+ first time the guest is started.
+
+<interface type='network'>
+ <source network='passthrough'>
+</interface>
+
+ After the first &vmguest; starts that uses the network with the pool
+ of VFs, verify the list of associated VFs. Do so by running
+ virsh net-dumpxml passthrough on the host.
+
+<network connections='1'>
+ <name>passthrough</name>
+ <uuid>a6a26429-d483-d4ed-3465-4436ac786437</uuid>
+ <forward mode='hostdev' managed='yes'>
+ <pf dev='eth0'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x10' function='0x1'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x10' function='0x3'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x10' function='0x5'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x10' function='0x7'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x11' function='0x1'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x11' function='0x3'/>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x11' function='0x5'/>
+ </forward>
+ </network>
+
+
+
+
+ Listing attached devices
+
+
+ Although there is no mechanism in &virsh; to list all &vmhost;'s devices
+ that have already been attached to its &vmguest;s, you can list all
+ devices attached to a specific &vmguest; by running the following
+ command:
+
+
+virsh dumpxml VMGUEST_NAME | xpath -e /domain/devices/hostdev
+
+
+ For example:
+
+
+
+&prompt.sudo;virsh dumpxml sles16-clone | xpath -e /domain/devices/hostdev
+Found 1 nodes in stdin:
+-- NODE --
+<hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address domain="0x0000" bus="0x01" slot="0x00" function="0x0" />
+ </source>
+ <address type="pci" domain="0x0000" bus="0x07" slot="0x00" function="0x0" />
+</hostdev>
+
+
+
+ Listing SR-IOV devices attached via <interface type='hostdev'>
+
+ For SR-IOV devices that are attached to the &vmhost; via
+ <interface type='hostdev'>, you need to use a
+ different XPath query:
+
+virsh dumpxml VMGUEST_NAME | xpath -e /domain/devices/interface/@type
+
+
+
+ Configuring storage devices
+
+
+ Storage devices are defined within the disk element. The usual
+ disk element supports several attributes. The following two
+ attributes are the most important:
+
+
+
+
+
+ The type attribute describes the source
+ of the virtual disk device. Valid values are
+ file , block
+ , dir ,
+ network , or
+ volume .
+
+
+
+
+ The device attribute shows how the
+ disk is exposed to the &vmguest; OS. As an example, possible values
+ can include floppy ,
+ disk , cdrom
+ , and others.
+
+
+
+
+
+ The following child elements are the most important:
+
+
+
+
+
+ driver contains the driver and the bus. These are used by
+ the &vmguest; to work with the new disk device.
+
+
+
+
+ The target element contains the device name under which
+ the new disk is shown in the &vmguest;. It also contains the optional
+ bus attribute, which defines the type of bus on which the new disk
+ should operate.
+
+
+
+
+
+ The following procedure shows how to add storage devices to the
+ &vmguest;:
+
+
+
+
+
+ Edit the configuration for an existing &vmguest;:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Add a disk element inside the devices element
+ together with the attributes type and
+ device:
+
+<disk type='file' device='disk'>
+
+
+
+ Specify a driver element and use the default values:
+
+<driver name='qemu' type='qcow2'/>
+
+
+
+ Create a disk image as a source for the new virtual disk device:
+
+&prompt.sudo;qemu-img create -f qcow2 /var/lib/libvirt/images/sles15.qcow2 32G
+
+
+
+ Add the path for the disk source:
+
+<source file='/var/lib/libvirt/images/sles15.qcow2'/>
+
+
+
+ Define the target device name in the &vmguest; and the bus on which
+ the disk should work:
+
+<target dev='vda' bus='virtio'/>
+
+
+
+ Restart your VM:
+
+&prompt.sudo;virsh start sles15
+
+
+
+
+ Your new storage device should be available in the &vmguest; OS.
+
+
+
+ Configuring controller devices
+
+
+ libvirt manages controllers automatically
+ based on the type of virtual devices used by the &vmguest;. If the
+ &vmguest; contains PCI and SCSI devices, PCI and SCSI controllers are
+ created and managed automatically. libvirt also models
+ controllers that are hypervisor-specific, for example, a
+ virtio-serial controller for &kvm;. Although the
+ default controllers and their configuration are generally fine, there may
+ be use cases where controllers or their attributes need to be adjusted
+ manually.
+
+
+
+ The default of 32 is enough in most circumstances, but a &vmguest; with
+ multiple I/O devices and an I/O-intensive workload may experience
+ performance issues because of grant frame exhaustion.
+
+
+
+ See the Controllers section of the libvirt
+ Domain XML format manual at
+
+ for more information.
+
+
+
+ Configuring video devices
+
+
+ When using the Virtual Machine Manager, only the Video device model can
+ be defined. The amount of allocated VRAM or 2D/3D acceleration can only
+ be changed in the XML configuration.
+
+
+
+ Changing the amount of allocated VRAM
+
+
+
+ Edit the configuration for an existing &vmguest;:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ Change the size of the allocated VRAM:
+
+<video>
+<model type='vga' vram='65535' heads='1'>
+...
+</model>
+</video>
+
+
+
+ Check if the amount of VRAM in the VM has changed by looking at the
+ amount in the Virtual Machine Manager.
+
+
+
+
+
+
+ Changing the state of 2D/3D acceleration
+
+
+
+ Edit the configuration for an existing &vmguest;:
+
+&prompt.sudo;virsh edit sles15
+
+
+
+ To enable/disable 2D/3D acceleration, change the value of
+ accel3d and accel2d
+ accordingly:
+
+
+<video>
+ <model>
+ <acceleration accel3d='yes' accel2d='no'>
+ </model>
+</video>
+
+
+
+ Enabling 2D/3D acceleration
+
+ Only virtio and vbox video
+ devices are capable of 2D/3D acceleration. You cannot enable it on
+ other video devices.
+
+
+
+
+
+ Configuring network devices
+
+
+ This section describes how to configure specific aspects of virtual
+ network devices by using &virsh;.
+
+
+
+ Find more details about &libvirt; network interface specification in
+ .
+
+
+
+ Scaling network performance with multiqueue virtio-net
+
+ The multiqueue virtio-net feature scales the network performance by
+ allowing the &vmguest;'s virtual CPUs to transfer packets in parallel.
+
+
+
+ To enable multiqueue virtio-net for a specific &vmguest;, edit its XML
+ configuration as described in
+ and modify its
+ network interface as follows:
+
+
+<interface type='network'>
+ [...]
+ <model type='virtio'/>
+ <driver name='vhost' queues='NUMBER_OF_QUEUES'/>
+</interface>
+
+
+
+
+ Using macvtap to share &vmhost; network interfaces
+
+
+ Macvtap provides direct attachment of a &vmguest; virtual interface to a
+ host network interface. The macvtap-based interface extends the &vmhost;
+ network interface and has its own MAC address on the same Ethernet
+ segment. Typically, this is used to make both the &vmguest; and the
+ &vmhost; show up directly on the switch that the &vmhost; is connected
+ to.
+
+
+
+ Macvtap cannot be used with a Linux bridge
+
+ Macvtap cannot be used with network interfaces already connected to a
+ Linux bridge. Before attempting to create the macvtap interface, remove
+ the interface from the bridge.
+
+
+
+
+ &vmguest; to &vmhost; communication with macvtap
+
+ When using macvtap, a &vmguest; can communicate with other &vmguest;s,
+ and with other external hosts on the network. But it cannot communicate
+ with the &vmhost; on which the &vmguest; runs. This is the defined
+ behavior of macvtap, because of the way the &vmhost;'s physical
+ Ethernet is attached to the macvtap bridge. Traffic from the &vmguest;
+ into that bridge that is forwarded to the physical interface cannot be
+ bounced back up to the &vmhost;'s IP stack. Similarly, traffic from the
+ &vmhost;'s IP stack that is sent to the physical interface cannot be
+ bounced back up to the macvtap bridge for forwarding to the &vmguest;.
+
+
+
+
+ Virtual network interfaces based on macvtap are supported by libvirt by
+ specifying an interface type of direct. For example:
+
+
+<interface type='direct'>
+ <mac address='aa:bb:cc:dd:ee:ff'/>
+ <source dev='eth0' mode='bridge'/>
+ <model type='virtio'/>
+ </interface>
+
+
+ The operation mode of the macvtap device can be controlled with the
+ mode attribute. The following list shows its possible
+ values and a description for each:
+
+
+
+
+
+ vepa: all &vmguest; packets are sent to an
+ external bridge. Packets whose destination is a &vmguest; on the same
+ &vmhost; as where the packet originates from are sent back to the
+ &vmhost; by the VEPA capable bridge (today's bridges are typically
+ not VEPA capable).
+
+
+
+
+ bridge: packets whose destination is on the same
+ &vmhost; as where they originate from are directly delivered to the
+ target macvtap device. Both origin and destination devices need to be
+ in bridge mode for direct delivery. If either
+ of them is in vepa mode, a VEPA capable bridge is
+ required.
+
+
+
+
+ private: all packets are sent to the external
+ bridge and delivered to a target &vmguest; on the same &vmhost; if
+ they are sent through an external router or gateway and that device
+ sends them back to the &vmhost;. This procedure is followed if either
+ the source or destination device is in private mode.
+
+
+
+
+ passthrough: a special mode that gives more power
+ to the network interface. All packets are forwarded to the interface,
+ allowing virtio &vmguest;s to change the MAC address or set
+ promiscuous mode to bridge the interface or create VLAN interfaces on
+ top of it. A network interface is not shareable in
+ passthrough mode. Assigning an interface to a
+ &vmguest; disconnects it from the &vmhost;. For this reason SR-IOV
+ virtual functions are often assigned to the &vmguest; in
+ passthrough mode.
+
+
+
+
+
+ Disabling a memory balloon device
+
+
+ Memory Balloon has become a default option for &kvm;. The device is added to
+ the &vmguest; explicitly, so you do not need to add this element in the
+ &vmguest;'s XML configuration. To disable Memory Balloon in the &vmguest;
+ for any reason, set model='none' as shown below:
+
+
+<devices>
+ <memballoon model='none'/>
+</device>
+
+
+ Configuring multiple monitors (dual head)
+
+
+ &libvirt; supports a dual head configuration to display the video output
+ of the &vmguest; on multiple monitors.
+
+
+
+ Configuring dual head
+
+
+ While the virtual machine is running, verify that the
+ xf86-video-qxl package is installed in the
+ &vmguest;:
+
+&prompt.user;rpm -q xf86-video-qxl
+
+
+
+ Shut down the &vmguest; and start editing its configuration XML as
+ described in .
+
+
+
+
+ Verify that the model of the virtual graphics card is
+ qxl:
+
+
+<video>
+ <model type='qxl' ... />
+
+
+
+
+ Increase the parameter in the graphics card
+ model specification from the default 1 to
+ 2, for example:
+
+
+<video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='2' primary='yes'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
+</video>
+
+
+
+
+ Configure the virtual machine to use the Spice display instead of
+ VNC:
+
+
+<graphics type='spice' port='5916' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+</graphics>
+
+
+
+
+ Start the virtual machine and connect to its display with
+ virt-viewer, for example:
+
+&prompt.user;virt-viewer --connect qemu+ssh://USER@VM_HOST/system
+
+
+
+ From the list of VMs, select the one whose configuration you have
+ modified and confirm with Connect.
+
+
+
+
+ After the graphical subsystem (Xorg) loads in the &vmguest;, select
+ ViewDisplaysDisplay
+ 2 to open a new window with the second
+ monitor's output.
+
+
+
+
+
+ Crypto adapter pass-through to &kvm; guests on &zseries;
+
+
+ Introduction
+
+ &zseries; machines include cryptographic hardware with useful functions
+ such as random number generation, digital signature generation, or
+ encryption. &kvm; allows dedicating these crypto adapters to guests as
+ pass-through devices. The means that the hypervisor cannot observe
+ communications between the guest and the device.
+
+
+
+
+ What is covered
+
+ This section describes how to dedicate a crypto adapter and domains on
+ an &zseries; host to a &kvm; guest. The procedure includes the
+ following basic steps:
+
+
+
+
+ Mask the crypto adapter and domains from the default driver on the
+ host.
+
+
+
+
+ Load the vfio-ap driver.
+
+
+
+
+ Assign the crypto adapter and domains to the
+ vfio-ap driver.
+
+
+
+
+ Configure the guest to use the crypto adapter.
+
+
+
+
+
+
+ Requirements
+
+
+
+ You need to have the &qemu; / &libvirt; virtualization environment
+ correctly installed and functional.
+
+
+
+
+ The vfio_ap and vfio_mdev
+ modules for the running kernel need to be available on the host
+ operating system.
+
+
+
+
+
+
+ Dedicate a crypto adapter to a &kvm; host
+
+
+
+ Verify that the vfio_ap and
+ vfio_mdev kernel modules are loaded on the host:
+
+&prompt.user;lsmod | grep vfio_
+
+ If any of them is not listed, load it manually, for example:
+
+&prompt.sudo;modprobe vfio_mdev
+
+
+
+ Create a new MDEV device on the host and verify that it was added:
+
+
+uuid=$(uuidgen)
+$ echo ${uuid} | sudo tee /sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough/create
+dmesg | tail
+[...]
+[272197.818811] iommu: Adding device 24f952b3-03d1-4df2-9967-0d5f7d63d5f2 to group 0
+[272197.818815] vfio_mdev 24f952b3-03d1-4df2-9967-0d5f7d63d5f2: MDEV: group_id = 0
+
+
+
+
+ Identify the device on the host's logical partition that you intend
+ to dedicate to a &kvm; guest:
+
+&prompt.user;ls -l /sys/bus/ap/devices/
+[...]
+lrwxrwxrwx 1 root root 0 Nov 23 03:29 00.0016 -> ../../../devices/ap/card00/00.0016/
+lrwxrwxrwx 1 root root 0 Nov 23 03:29 card00 -> ../../../devices/ap/card00/
+
+
+ In this example, it is card 0 queue
+ 16. To match the Hardware Management Console
+ (HMC) configuration, you need to convert from 16
+ hexadecimal to 22 decimal.
+
+
+
+
+ Mask the adapter from the zcrypt use:
+
+
+&prompt.user;lszcrypt
+CARD.DOMAIN TYPE MODE STATUS REQUEST_CNT
+-------------------------------------------------
+00 CEX5C CCA-Coproc online 5
+00.0016 CEX5C CCA-Coproc online 5
+
+
+ Mask the adapter:
+
+
+&prompt.user;cat /sys/bus/ap/apmask
+0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+echo -0x0 | sudo tee /sys/bus/ap/apmask
+0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+
+ Mask the domain:
+
+
+&prompt.user;cat /sys/bus/ap/aqmask
+0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+echo -0x0 | sudo tee /sys/bus/ap/aqmask
+0xfffffdffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+
+
+
+ Assign adapter 0 and domain 16 (22 decimal) to
+ vfio-ap:
+
+
+&prompt.sudo;echo +0x0 > /sys/devices/vfio_ap/matrix/${uuid}/assign_adapter
+&prompt.user;echo +0x16 | sudo tee /sys/devices/vfio_ap/matrix/${uuid}/assign_domain
+&prompt.user;echo +0x16 | sudo tee /sys/devices/vfio_ap/matrix/${uuid}/assign_control_domain
+
+
+
+
+ Verify the matrix that you have configured:
+
+
+&prompt.user;cat /sys/devices/vfio_ap/matrix/${uuid}/matrix
+00.0016
+
+
+
+
+ Either create a new VM
+ and wait until it is initialized, or use an existing VM. In both
+ cases, make sure the VM is shut down.
+
+
+
+
+ Change its configuration to use the MDEV device:
+
+
+&prompt.sudo;virsh edit VM_NAME
+[...]
+<hostdev mode='subsystem' type='mdev' model='vfio-ap'>
+ <source>
+ <address uuid='24f952b3-03d1-4df2-9967-0d5f7d63d5f2'/>
+ </source>
+</hostdev>
+[...]
+
+
+
+
+ Restart the VM:
+
+&prompt.sudo;virsh reboot VM_NAME
+
+
+
+ Log in to the guest and verify that the adapter is present:
+
+
+&prompt.user;lszcrypt
+CARD.DOMAIN TYPE MODE STATUS REQUEST_CNT
+-------------------------------------------------
+00 CEX5C CCA-Coproc online 1
+00.0016 CEX5C CCA-Coproc online 1
+
+
+
+
+
+
+ Further reading
+
+
+
+
+ The vfio_ap architecture is detailed in
+ .
+
+
+
+
+ A general outline together with a detailed procedure is described
+ in
+ .
+
+
+
+
+ The architecture of VFIO Mediated devices (MDEVs) is detailed in
+ .
+
+
+
+
+
+
diff --git a/references/libvirt_connect.xml b/references/libvirt_connect.xml
new file mode 100644
index 000000000..2156e5eca
--- /dev/null
+++ b/references/libvirt_connect.xml
@@ -0,0 +1,1413 @@
+
+
+%entities;
+]>
+
+
+ Connecting and authorizing
+
+
+
+ yes
+
+
+
+ Managing several &vmhost;s, each hosting multiple &vmguest;s, quickly
+ becomes difficult. One benefit of &libvirt; is the ability to connect to
+ several &vmhost;s at once, providing a single interface to manage all
+ &vmguest;s and to connect to their graphical console.
+
+
+ To ensure only authorized users can connect, &libvirt; offers several
+ connection types (via TLS, SSH, Unix sockets, and TCP) that can be combined
+ with different authorization mechanisms (socket, &pk;, SASL and Kerberos).
+
+
+ Authentication
+
+
+ The power to manage &vmguest;s and to access their graphical console is
+ something that should be restricted to a well-defined circle of persons.
+ To achieve this goal, you can use the following authentication techniques
+ on the &vmhost;:
+
+
+
+
+
+ Access control for Unix sockets with permissions and group ownership.
+ This method is available for &libvirtd; connections only.
+
+
+
+
+ Access control for Unix sockets with &pk;. This method is available
+ for local &libvirtd; connections only.
+
+
+
+
+ User name and password authentication with SASL (Simple
+ Authentication and Security Layer). This method is available for
+ both &libvirtd; and VNC connections. Using SASL does not require
+ real user accounts on the server, since it uses its own database to
+ store user names and passwords. Connections authenticated with SASL
+ are encrypted.
+
+
+
+
+ Kerberos authentication. This method, available for &libvirtd;
+ connections only, is not covered in this manual. Refer to
+
+ for details.
+
+
+
+
+ Single password authentication. This method is available for VNC
+ connections only.
+
+
+
+
+
+ Authentication for &libvirtd; and VNC needs to be configured separately
+
+ Access to the &vmguest;'s management functions (via &libvirtd;) and to
+ its graphical console always needs to be configured separately. When
+ restricting access to the management tools, these restrictions do
+ not automatically apply to VNC connections.
+
+
+
+
+ When accessing &vmguest;s from remote via TLS/SSL connections, access can
+ be indirectly controlled on each client by restricting read permissions
+ to the certificate's key file to a certain group. See
+ for details.
+
+
+
+ &libvirtd; authentication
+
+ &libvirtd; authentication is configured in
+ /etc/libvirt/libvirtd.conf. The configuration made
+ here applies to all &libvirt; tools such as the &vmm; or
+ virsh.
+
+
+ &libvirt; offers two sockets: a read-only socket for monitoring
+ purposes and a read-write socket to be used for management operations.
+ Access to both sockets can be configured independently. By default,
+ both sockets are owned by root.root. Default access
+ permissions on the read-write socket are restricted to the user
+ &rootuser; (0700) and fully open on the read-only
+ socket (0777).
+
+
+ The following instructions describe how to configure access permissions
+ for the read-write socket. The same instructions also apply to the
+ read-only socket. All configuration steps need to be carried out on the
+ &vmhost;.
+
+
+ Default authentication settings on &productname;
+
+ The default authentication method on &productname; is access control
+ for Unix sockets. Only the user &rootuser; may authenticate. When
+ accessing the &libvirt; tools as a non-root user directly on the
+ &vmhost;, you need to provide the &rootuser; password through &pk;
+ once. You are then granted access for the current and for future
+ sessions.
+
+
+ Alternatively, you can configure &libvirt; to allow
+ system access to non-privileged users. See
+ for details.
+
+
+
+ Recommended authorization methods
+
+ Local connections
+
+
+
+
+
+
+
+
+
+
+
+
+ Remote tunnel over SSH
+
+
+
+
+
+
+
+ Remote TLS/SSL connection
+
+
+
+
+
+
+
+ none (access controlled on the client side by restricting
+ access to the certificates)
+
+
+
+
+
+
+
+ Access control for Unix sockets with permissions and group ownership
+
+ To grant access for non-&rootuser; accounts, configure the sockets to
+ be owned and accessible by a certain group
+ (libvirt in the following
+ example). This authentication method can be used for local and remote
+ SSH connections.
+
+
+
+
+ In case it does not exist, create the group that should own the
+ socket:
+
+ &prompt.sudo;groupadd libvirt
+
+ Group needs to exist
+
+ The group must exist before restarting &libvirtd;. If not, the
+ restart fails.
+
+
+
+
+
+ Add the desired users to the group:
+
+ &prompt.sudo;usermod --append --groups libvirt &exampleuser_plain;
+
+
+
+ Change the configuration in
+ /etc/libvirt/libvirtd.conf as follows:
+
+ unix_sock_group = "libvirt"
+ unix_sock_rw_perms = "0770"
+ auth_unix_rw = "none"
+
+
+
+ Group ownership is set to the group libvirt.
+
+
+
+
+ Sets the access permissions for the socket
+ (srwxrwx---).
+
+
+
+
+ Disables other authentication methods (&pk; or SASL). Access
+ is solely controlled by the socket permissions.
+
+
+
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl start libvirtd
+
+
+
+
+ Local access control for Unix sockets with &pk;
+
+ Access control for Unix sockets with &pk; is the default
+ authentication method on &productname; for non-remote connections.
+ Therefore, no &libvirt; configuration changes are needed. With &pk;
+ authorization enabled, permissions on both sockets default to
+ 0777 and each application trying to access a
+ socket needs to authenticate via &pk;.
+
+
+ &pk; authentication for local connections only
+
+ Authentication with &pk; can only be used for local connections on
+ the &vmhost; itself, since &pk; does not handle remote
+ authentication.
+
+
+
+
+ Two policies for accessing &libvirt;'s sockets exist:
+
+
+
+
+ org.libvirt.unix.monitor: accessing the
+ read-only socket
+
+
+
+
+ org.libvirt.unix.manage: accessing the
+ read-write socket
+
+
+
+
+ By default, the policy for accessing the read-write socket is to
+ authenticate with the &rootuser; password once and grant the
+ privilege for the current and for future sessions.
+
+
+ To grant users access to a socket without having to provide the
+ &rootuser; password, you need to create a rule in
+ /etc/polkit-1/rules.d. Create the file
+ /etc/polkit-1/rules.d/10-grant-libvirt with the
+ following content to grant access to the read-write socket to all
+ members of the group
+ libvirt:
+
+ polkit.addRule(function(action, subject) {
+ if (action.id == "org.libvirt.unix.manage" && subject.isInGroup("libvirt")) {
+ return polkit.Result.YES;
+ }
+ });
+
+
+ User name and password authentication with SASL
+
+ SASL provides user name and password authentication and data
+ encryption (digest-md5, by default). Since SASL maintains its own
+ user database, the users do not need to exist on the &vmhost;. SASL
+ is required by TCP connections and on top of TLS/SSL connections.
+
+
+ Plain TCP and SASL with digest-md5 encryption
+
+ Using digest-md5 encryption on an otherwise not encrypted TCP
+ connection does not provide enough security for production
+ environments. It is recommended to only use it in testing
+ environments.
+
+
+
+ SASL authentication on top of TLS/SSL
+
+ Access from remote TLS/SSL connections can be indirectly controlled
+ on the client side by restricting access to
+ the certificate's key file. However, this may prove error-prone
+ when dealing with many clients. Using SASL with TLS adds security
+ by additionally controlling access on the server side.
+
+
+
+ To configure SASL authentication, proceed as follows:
+
+
+
+
+ Change the configuration in
+ /etc/libvirt/libvirtd.conf as follows:
+
+
+
+
+ To enable SASL for TCP connections:
+
+ auth_tcp = "sasl"
+
+
+
+ To enable SASL for TLS/SSL connections:
+
+ auth_tls = "sasl"
+
+
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+ The libvirt SASL configuration file is located at
+ /etc/sasl2/libvirtd.conf. Normally, there is
+ no need to change the defaults. However, if using SASL on top of
+ TLS, you may turn off session encryption to avoid additional
+ overhead (TLS connections are already encrypted) by commenting
+ the line setting the mech_list parameter. Only
+ do this for TLS/SASL. For TCP connections, this parameter must be
+ set to digest-md5.
+
+ #mech_list: digest-md5
+
+
+
+ By default, no SASL users are configured, so no logins are
+ possible. Use the following commands to manage users:
+
+
+
+ Add the user &exampleuser;
+
+ saslpasswd2 -a libvirt &exampleuser_plain;
+
+
+
+ Delete the user &exampleuser;
+
+ saslpasswd2 -a libvirt -d &exampleuser_plain;
+
+
+
+ List existing users
+
+ sasldblistusers2 -f /etc/libvirt/passwd.db
+
+
+
+
+
+
+ virsh and SASL authentication
+
+ When using SASL authentication, you are prompted for a user name
+ and password every time you issue a virsh
+ command. Avoid this by using virsh in shell
+ mode.
+
+
+
+
+
+
+ VNC authentication
+
+ Since access to the graphical console of a &vmguest; is not controlled
+ by &libvirt;, but by the specific hypervisor, it is always
+ necessary to additionally configure VNC authentication. The main
+ configuration file is
+ /etc/libvirt/<hypervisor>.conf. This section
+ describes the &qemu;/&kvm; hypervisor, so the target configuration file
+ is /etc/libvirt/qemu.conf.
+
+
+ Two authentication types are available: SASL and single-password
+ authentication. If you are using SASL for &libvirt; authentication, it
+ is strongly recommended to use it for VNC authentication as
+ well—it is possible to share the same database.
+
+
+ A third method to restrict access to the &vmguest; is to enable the use
+ of TLS encryption on the VNC server. This requires the VNC clients to
+ have access to x509 client certificates. By restricting access to these
+ certificates, access can indirectly be controlled on the client side.
+ Refer to
+ for details.
+
+
+ User name and password authentication with SASL
+
+ SASL provides user name and password authentication and data
+ encryption. Since SASL maintains its own user database, the users do
+ not need to exist on the &vmhost;. As with SASL authentication for
+ &libvirt;, you may use SASL on top of TLS/SSL connections. Refer to
+ for
+ details on configuring these connections.
+
+
+ To configure SASL authentication for VNC, proceed as follows:
+
+
+
+
+ Create a SASL configuration file. It is recommended to use the
+ existing &libvirt; file. If you have already configured SASL for
+ &libvirt; and are planning to use the same settings, including the
+ same user name and password database, a simple link is suitable:
+
+ &prompt.sudo;ln -s /etc/sasl2/libvirt.conf /etc/sasl2/qemu.conf
+
+ If are setting up SASL for VNC only or you are planning to use a
+ different configuration than for &libvirt;, copy the existing
+ file to use as a template:
+
+ &prompt.sudo;cp /etc/sasl2/libvirt.conf /etc/sasl2/qemu.conf
+
+ Then edit it according to your needs.
+
+
+
+
+ Change the configuration in
+ /etc/libvirt/qemu.conf as follows:
+
+ vnc_listen = "0.0.0.0"
+ vnc_sasl = 1
+ sasldb_path: /etc/libvirt/qemu_passwd.db
+
+ The first parameter enables VNC to listen on all public
+ interfaces (rather than to the local host only), and the second
+ parameter enables SASL authentication.
+
+
+
+
+ By default, no SASL users are configured, so no logins are
+ possible. Use the following commands to manage users:
+
+
+
+ Add the user &exampleuser;
+
+ &prompt.user;saslpasswd2 -f /etc/libvirt/qemu_passwd.db -a qemu &exampleuser_plain;
+
+
+
+ Delete the user &exampleuser;
+
+ &prompt.user;saslpasswd2 -f /etc/libvirt/qemu_passwd.db -a qemu -d &exampleuser_plain;
+
+
+
+ List existing users
+
+ &prompt.user;sasldblistusers2 -f /etc/libvirt/qemu_passwd.db
+
+
+
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+ Restart all &vmguest;s that have been running before changing the
+ configuration. &vmguest;s that have not been restarted cannot use
+ SASL authentication for VNC connects.
+
+
+
+
+ Supported VNC viewers
+
+ SASL authentication is currently supported by &vmm; and
+ virt-viewer. Both viewers also support
+ TLS/SSL connections.
+
+
+
+
+ Single password authentication
+
+ Access to the VNC server may also be controlled by setting a VNC
+ password. You can either set a global password for all &vmguest;s or
+ set individual passwords for each guest. The latter requires editing
+ the &vmguest;'s configuration files.
+
+
+ Always set a global password
+
+ If you are using single password authentication, it is good
+ practice to set a global password even if setting passwords for
+ each &vmguest;. This protects your virtual machines with a
+ fallback password if you forget to set a per-machine
+ password. The global password is only used if no other password is
+ set for the machine.
+
+
+
+ Setting a global VNC password
+
+
+ Change the configuration in
+ /etc/libvirt/qemu.conf as follows:
+
+ vnc_listen = "0.0.0.0"
+ vnc_password = "PASSWORD"
+
+ The first parameter enables VNC to listen on all public
+ interfaces (rather than to the local host only), and the second
+ parameter sets the password. The maximum length of the password
+ is eight characters.
+
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+ Restart all &vmguest;s that have been running before changing the
+ configuration. &vmguest;s that have not been restarted cannot use
+ password authentication for VNC connects.
+
+
+
+
+ Setting a &vmguest; specific VNC password
+
+
+ Change the configuration in
+ /etc/libvirt/qemu.conf as follows to enable
+ VNC to listen on all public interfaces (rather than to the local
+ host only).
+
+ vnc_listen = "0.0.0.0"
+
+
+
+ Open the &vmguest;'s XML configuration file in an editor. Replace
+ VM_NAME in the following example with
+ the name of the &vmguest;. The editor that is used defaults to
+ $EDITOR. If that variable is not set,
+ vi is used.
+
+ &prompt.user;virsh edit VM_NAME
+
+
+
+ Search for the element <graphics> with
+ the attribute type='vnc', for example:
+
+ <graphics type='vnc' port='-1' autoport='yes'/>
+
+
+
+ Add the
+ passwd=PASSWORD
+ attribute, save the file and exit the editor. The maximum length
+ of the password is eight characters.
+
+ <graphics type='vnc' port='-1' autoport='yes' passwd='PASSWORD'/>
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+ Restart all &vmguest;s that have been running before changing the
+ configuration. &vmguest;s that have not been restarted cannot use
+ password authentication for VNC connects.
+
+
+
+
+ Security of the VNC protocol
+
+ The VNC protocol is not considered to be safe. Although the
+ password is sent encrypted, it may be vulnerable when an attacker
+ can sniff both the encrypted password and the encryption key.
+ Therefore, it is recommended to use VNC with TLS/SSL or tunneled
+ over SSH. virt-viewer, and &vmm;
+ support both methods.
+
+
+
+
+
+
+ Connecting to a &vmhost;
+
+
+ To connect to a hypervisor with &libvirt;, you need to specify a uniform
+ resource identifier (URI). This URI is needed with
+ virsh and virt-viewer (except when
+ working as &rootuser; on the &vmhost;) and is optional for the &vmm;.
+ Although the latter can be called with a connection parameter (for
+ example, virt-manager -c qemu:///system), it also
+ offers a graphical interface to create connection URIs. See
+ for details.
+
+
+ HYPERVISOR+PROTOCOL://USER@REMOTE/CONNECTION_TYPE
+
+
+
+
+ Specify the hypervisor. &productname; currently supports the
+ following hypervisors: (testing purposes),
+ (&kvm;) option. This
+ parameter is mandatory.
+
+
+
+
+ When connecting to a remote host, specify the protocol here. It can
+ be one of: (connection via SSH tunnel),
+ (TCP connection with SASL/Kerberos
+ authentication), (TLS/SSL encrypted connection
+ with authentication via x509 certificates).
+
+
+
+
+ When connecting to a remote host, specify the user name and the
+ remote host name. If no user name is specified, the user name that
+ has called the command ($USER) is used. See below for
+ more information. For TLS connections, the host name needs to be
+ specified exactly as in the x509 certificate.
+
+
+
+
+ When connecting to the &qemu;/&kvm; hypervisor,
+ two connection types are accepted: for full
+ access rights, or for restricted access.
+ Since access is not supported on
+ &productname;, this documentation focuses on
+ access.
+
+
+
+
+
+ Example hypervisor connection URIs
+
+
+
+
+ Connect to the local testing hypervisor.
+
+
+
+
+
+
+
+ Connect to the &qemu; hypervisor on the remote host
+ &wsIVname;. The connection is established via an SSH tunnel.
+
+
+
+
+
+
+
+ Connect to the &qemu; hypervisor on the remote host
+ &wsIVname;. The connection is established using TLS/SSL.
+
+
+
+
+
+
+ For more details and examples, refer to the &libvirt; documentation at
+ .
+
+
+
+ User names in URIs
+
+ A user name needs to be specified when using Unix socket authentication
+ (regardless of whether using the user/password authentication scheme or
+ &pk;). This applies to all SSH and local connections.
+
+
+ There is no need to specify a user name when using SASL authentication
+ (for TCP or TLS connections) or when doing no additional server-side
+ authentication for TLS connections. With SASL, the user name is not
+ evaluated—you are prompted for an SASL user/password combination
+ in any case.
+
+
+
+
+
+
+ system access for non-privileged users
+
+ As mentioned above, a connection to the &qemu; hypervisor can be
+ established using two different protocols: session
+ and system. A session connection is
+ spawned with the same privileges as the client program. Such a
+ connection is intended for desktop virtualization, since it is
+ restricted, for example, no USB/PCI device assignments, no virtual
+ network setup, limited remote access to &libvirtd;.
+
+
+ The system connection intended for server virtualization
+ has no functional restrictions but is, by default, only accessible by
+ &rootuser;. However, with the addition of the DAC (Discretionary Access
+ Control) driver to &libvirt;, it is now possible to grant non-privileged
+ users system access. To grant system
+ access to the user &exampleuser;, proceed as follows:
+
+
+ Granting system access to a regular user
+
+
+ Enable access via Unix sockets, as described in
+ .
+ In that example, access to libvirt is granted to all members of the
+ group libvirt and
+ &exampleuser; made a member of this group. This ensures that
+ &exampleuser; can connect using virsh or &vmm;.
+
+
+
+
+ Edit /etc/libvirt/qemu.conf and change the
+ configuration as follows:
+
+ user = "tux"
+ group = "libvirt"
+ dynamic_ownership = 1
+
+ This ensures that the &vmguest;s are started by &exampleuser; and
+ that resources bound to the guest, for example, virtual disks, can
+ be accessed and modified by &exampleuser;.
+
+
+
+
+ Make &exampleuser; a member of the group
+ kvm:
+
+ &prompt.sudo;usermod --append --groups kvm &exampleuser_plain;
+
+ This step is needed to grant access to
+ /dev/kvm, which is required to start
+ &vmguest;s.
+
+
+
+
+ Restart &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+
+
+ Managing connections with &vmm;
+
+ The &vmm; uses a Connection for every &vmhost; it
+ manages. Each connection contains all &vmguest;s on the respective
+ host. By default, a connection to the local host is already configured
+ and connected.
+
+
+ All configured connections are displayed in the &vmm; main window.
+ Active connections are marked with a small triangle, which you can
+ click to fold or unfold the list of &vmguest;s for this connection.
+
+
+ Inactive connections are listed gray and are marked with Not
+ Connected. Either double-click or right-click it and choose
+ Connect from the context menu. You can also
+ Delete an existing connection from this menu.
+
+
+ Editing existing connections
+
+ It is not possible to edit an existing connection. To change a
+ connection, create a new one with the desired parameters and delete
+ the old one.
+
+
+
+ To add a new connection in the &vmm;, proceed as follows:
+
+
+
+
+ Choose FileAdd
+ Connection
+
+
+
+
+ Choose the host's Hypervisor
+ (&qemu;/KVM)
+
+
+
+
+ To set up a remote connection, choose Connect to remote
+ host. For more information, see
+ .
+
+
+ In case of a remote connection, specify the
+ Hostname of the remote machine in the format
+ USERNAME@REMOTE
+ _HOST.
+
+
+ Specifying a user name
+
+ There is no need to specify a user name for TCP and TLS
+ connections: in these cases, it is not evaluated. However, for
+ SSH connections, specifying a user name is necessary when you
+ want to connect as a user other than
+ root.
+
+
+
+
+
+ If you do not want the connection to be automatically started when
+ starting the &vmm;, deactivate Autoconnect.
+
+
+
+
+ Finish the configuration by clicking Connect.
+
+
+
+
+
+
+ Configuring remote connections
+
+
+ A major benefit of &libvirt; is the ability to manage &vmguest;s on
+ different remote hosts from a central location. This section gives
+ detailed instructions on how to configure server and client to allow
+ remote connections.
+
+
+
+ Remote tunnel over SSH (qemu+ssh)
+
+ Enabling a remote connection that is tunneled over SSH on the &vmhost;
+ only requires the ability to accept SSH connections. Make sure the SSH
+ daemon is started (systemctl status sshd) and that
+ the ports for service SSH are
+ opened in the firewall.
+
+
+ User authentication for SSH connections can be done using traditional
+ file user/group ownership and permissions as described in
+ .
+ Connecting as user &exampleuser_plain;
+ (qemu+ssh://&exampleuser_plain;sIVname;/system) works
+ out of the box and does not require additional configuration on the
+ &libvirt; side.
+
+
+ When connecting via SSH
+ qemu+ssh://USER@SYSTEM
+ you need to provide the password for USER.
+ This can be avoided by copying your public key to
+ ~USER/.ssh/authorized_keys
+ on the &vmhost;.
+
+
+
+
+ Remote TLS/SSL connection with x509 certificate (qemu+tls
+
+ Using TCP connections with TLS/SSL encryption and authentication via
+ x509 certificates is much more complicated to set up than SSH, but it
+ is a lot more scalable. Use this method if you need to manage several
+ &vmhost;s with a varying number of administrators.
+
+
+ Basic concept
+
+ TLS (Transport Layer Security) encrypts the communication between two
+ computers by using certificates. The computer starting the connection
+ is always considered the client, using a client
+ certificate, while the receiving computer is always
+ considered the server, using a server
+ certificate. This scenario applies, for example, if you
+ manage your &vmhost;s from a central desktop.
+
+
+ If connections are initiated from both computers, each needs to have
+ a client and a server certificate. This is the
+ case, for example, if you migrate a &vmguest; from one host to
+ another.
+
+
+ Each x509 certificate has a matching private key file. Only the
+ combination of certificate and private key file can identify itself
+ correctly. To assure that a certificate was issued by the assumed
+ owner, it is signed and issued by a central certificate called
+ certificate authority (CA). Both the client and the server
+ certificates must be issued by the same CA.
+
+
+ User authentication
+
+ Using a remote TLS/SSL connection only ensures that two computers
+ are allowed to communicate in a certain direction. Restricting
+ access to certain users can indirectly be achieved on the client
+ side by restricting access to the certificates. For more
+ information, see
+ .
+
+
+ &libvirt; also supports user authentication on the server with
+ SASL. For more information, see
+ .
+
+
+
+
+ Configuring the &vmhost;
+
+ The &vmhost; is the machine receiving connections. Therefore, the
+ server certificates need to be installed. The CA
+ certificate needs to be installed, too. When the certificates are in
+ place, TLS support can be turned on for &libvirt;.
+
+
+
+
+ Create the server certificate and export it together with the
+ respective CA certificate.
+
+
+
+
+ Create the following directories on the &vmhost;:
+
+ &prompt.sudo;mkdir -p /etc/pki/CA/ /etc/pki/libvirt/private/
+
+ Install the certificates as follows:
+
+ &prompt.sudo;/etc/pki/CA/cacert.pem
+ &prompt.sudo;/etc/pki/libvirt/servercert.pem
+ &prompt.sudo;/etc/pki/libvirt/private/serverkey.pem
+
+ Restrict access to certificates
+
+ Make sure to restrict access to certificates, as explained in
+ .
+
+
+
+
+
+ Enable TLS support by enabling the relevant socket and restarting
+ &libvirtd;:
+
+
+ &prompt.sudo;systemctl stop libvirtd.service
+ &prompt.sudo;systemctl enable --now libvirtd-tls.socket
+ &prompt.sudo;systemctl start libvirtd.service
+
+
+
+
+ By default, &libvirt; uses the TCP port 16514 for accepting
+ secure TLS connections. Open this port in the firewall.
+
+
+
+
+ Restarting &libvirtd; with TLS enabled
+
+ If you enable TLS for &libvirt;, the server certificates need to be
+ in place, otherwise restarting &libvirtd; fails. You also need to
+ restart &libvirtd; in case you change the certificates.
+
+
+
+
+ Configuring the client and testing the setup
+
+ The client is the machine initiating connections. Therefore the
+ client certificates need to be installed. The CA
+ certificate needs to be installed, too.
+
+
+
+
+ Create the client certificate and export it together with the
+ respective CA certificate.
+
+
+
+
+ Create the following directories on the client:
+
+ &prompt.sudo;mkdir -p /etc/pki/CA/ /etc/pki/libvirt/private/
+
+ Install the certificates as follows:
+
+ &prompt.sudo;/etc/pki/CA/cacert.pem
+ &prompt.sudo;/etc/pki/libvirt/clientcert.pem
+ &prompt.sudo;/etc/pki/libvirt/private/clientkey.pem
+
+ Restrict access to certificates
+
+ Make sure to restrict access to certificates, as explained in
+ .
+
+
+
+
+
+ Test the client/server setup by issuing the following command.
+ Replace &wsIVname; with the name of
+ your &vmhost;. Specify the same fully qualified host name as used
+ when creating the server certificate.
+
+ #&qemu;/&kvm;
+ virsh -c qemu+tls://&wsIVname;/system list --all
+
+ If your setup is correct, you can see a list of all &vmguest;s
+ registered with &libvirt; on the &vmhost;.
+
+
+
+
+
+ Enabling VNC for TLS/SSL connections
+
+ Currently, VNC communication over TLS is only supported by a few
+ tools. Common VNC viewers such as tightvnc or
+ tigervnc do not support TLS/SSL.
+
+
+ VNC over TLS/SSL: &vmhost; configuration
+
+ To access the graphical console via VNC over TLS/SSL, you need to
+ configure the &vmhost; as follows:
+
+
+
+
+ Open ports for the service
+ VNC in your firewall.
+
+
+
+
+ Create a directory /etc/pki/libvirt-vnc
+ and link the certificates into this directory as follows:
+
+ &prompt.sudo;mkdir -p /etc/pki/libvirt-vnc && cd /etc/pki/libvirt-vnc
+ &prompt.sudo;ln -s /etc/pki/CA/cacert.pem ca-cert.pem
+ &prompt.sudo;ln -s /etc/pki/libvirt/servercert.pem server-cert.pem
+ &prompt.sudo;ln -s /etc/pki/libvirt/private/serverkey.pem server-key.pem
+
+
+
+ Edit /etc/libvirt/qemu.conf and set the
+ following parameters:
+
+ vnc_listen = "0.0.0.0"
+ vnc_tls = 1
+ vnc_tls_x509_verify = 1
+
+
+
+ Restart the &libvirtd;:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+ &vmguest;s need to be restarted
+
+ The VNC TLS setting is only set when starting a &vmguest;.
+ Therefore, you need to restart all machines that have been
+ running before making the configuration change.
+
+
+
+
+
+
+ VNC over TLS/SSL: client configuration
+
+ The only action needed on the client side is to place the x509
+ client certificates in a location recognized by the client of
+ choice. However, &vmm; and virt-viewer expect
+ the certificates in a different location. &vmm; can either read
+ from a system-wide location applying to all users, or from a
+ per-user location.
+
+
+
+ &vmm; (virt-manager)
+
+
+ To connect to the remote host, &vmm; requires the setup
+ explained in
+ . To
+ be able to connect via VNC, the client certificates also need
+ to be placed in the following locations:
+
+
+
+ System-wide location
+
+
+
+ /etc/pki/CA/cacert.pem
+
+
+ /etc/pki/libvirt-vnc/clientcert.pem
+
+
+ /etc/pki/libvirt-vnc/private/clientkey.pem
+
+
+
+
+
+ Per-user location
+
+
+
+ /etc/pki/CA/cacert.pem
+
+
+ ~/.pki/libvirt-vnc/clientcert.pem
+
+
+ ~/.pki/libvirt-vnc/private/clientkey.pem
+
+
+
+
+
+
+
+
+ virt-viewer
+
+
+ virt-viewer only accepts certificates from
+ a system-wide location:
+
+
+
+ /etc/pki/CA/cacert.pem
+
+
+ /etc/pki/libvirt-vnc/clientcert.pem
+
+
+ /etc/pki/libvirt-vnc/private/clientkey.pem
+
+
+
+
+
+
+ Restrict access to certificates
+
+ Make sure to restrict access to certificates, as explained in
+ .
+
+
+
+
+
+ Restricting access (security considerations)
+
+ Each x509 certificate consists of two pieces: the public certificate
+ and a private key. A client can only authenticate using both pieces.
+ Therefore, any user that has read access to the client certificate
+ and its private key can access your &vmhost;. On the other hand, an
+ arbitrary machine equipped with the full server certificate can
+ pretend to be the &vmhost;. Since this is not desirable, access to at
+ least the private key files needs to be restricted as much as
+ possible. The easiest way to control access to a key file is to use
+ access permissions.
+
+
+
+ Server certificates
+
+
+ Server certificates need to be readable for &qemu; processes.
+ On &productname; &qemu;, processes started from &libvirt; tools
+ are owned by &rootuser;, so it is sufficient if the &rootuser;
+ can read the certificates:
+
+ &prompt.user;chmod 700 /etc/pki/libvirt/private/
+ &prompt.user;chmod 600 /etc/pki/libvirt/private/serverkey.pem
+
+ If you change the ownership for &qemu; processes in
+ /etc/libvirt/qemu.conf, you also need to
+ adjust the ownership of the key file.
+
+
+
+
+ System-wide client certificates
+
+
+ To control access to a key file that is available system-wide,
+ restrict read access to a certain group, so that only members
+ of that group can read the key file. In the following example,
+ a group libvirt is
+ created, and group ownership of the
+ clientkey.pem file and its parent
+ directory is set to
+ libvirt. Afterward,
+ the access permissions are restricted to owner and group.
+ Finally, the user &exampleuser; is added to the group
+ libvirt, and thus
+ can access the key file.
+
+ CERTPATH="/etc/pki/libvirt/"
+ # create group libvirt
+ groupadd libvirt
+ # change ownership to user root and group libvirt
+ chown root.libvirt $CERTPATH/private $CERTPATH/clientkey.pem
+ # restrict permissions
+ chmod 750 $CERTPATH/private
+ chmod 640 $CERTPATH/private/clientkey.pem
+ # add user tux to group libvirt
+ usermod --append --groups libvirt tux
+
+
+
+ Per-user certificates
+
+
+ User-specific client certificates for accessing the graphical
+ console of a &vmguest; via VNC need to be placed in the user's
+ home directory in ~/.pki. Contrary to SSH,
+ for example, the VNC viewer using these certificates does not
+ check the access permissions of the private key file.
+ Therefore, it is solely the user's responsibility to make sure
+ the key file is not readable by others.
+
+
+
+
+
+ Restricting access from the server side
+
+ By default, every client that is equipped with appropriate client
+ certificates may connect to a &vmhost; accepting TLS connections.
+ Therefore, it is possible to use additional server-side
+ authentication with SASL as described in
+ .
+
+
+ It is also possible to restrict access with a allowlist of DNs
+ (distinguished names), so only clients with a certificate matching
+ a DN from the list can connect.
+
+
+ Add a list of allowed DNs to tls_allowed_dn_list
+ in /etc/libvirt/libvirtd.conf. This list may
+ contain wild cards. Do not specify an empty list, since that would
+ result in refusing all connections.
+
+ tls_allowed_dn_list = [
+ "C=US,L=Provo,O=SUSE Linux Products GmbH,OU=*,CN=&wsIIname;,EMAIL=*",
+ "C=DE,L=Nuremberg,O=SUSE Linux Products GmbH,OU=Documentation,CN=*"]
+
+ Get the distinguished name of a certificate with the following
+ command:
+
+ &prompt.user;certtool -i --infile /etc/pki/libvirt/clientcert.pem | grep "Subject:"
+
+ Restart &libvirtd; after having changed the configuration:
+
+ &prompt.sudo;systemctl restart libvirtd
+
+
+
+ Central user authentication with SASL for TLS sockets
+
+
+ A direct user authentication via TLS is not possible—this is
+ handled indirectly on each client via the read permissions for the
+ certificates as explained in
+ . However,
+ if a central, server-based user authentication is needed, &libvirt;
+ also allows to use SASL (Simple Authentication and Security Layer) on
+ top of TLS for direct user authentication. See
+ for
+ configuration details.
+
+
+
+ Troubleshooting
+
+
+ &vmm;/virsh cannot connect to server
+
+ Check the following in the given order:
+
+
+
+ Is it a firewall issue (TCP port 16514 needs to be open on the
+ server)?
+
+
+ Is the client certificate (certificate and key) readable by the
+ user that has started &vmm;/ virsh?
+
+
+ Has the same full qualified host name as in the server
+ certificate been specified with the connection?
+
+
+ Is TLS enabled on the server (listen_tls =
+ 1)?
+
+
+ Has &libvirtd; been restarted on the server?
+
+
+
+
+ VNC connection fails
+
+ Ensure that you can connect to the remote server using &vmm;. If
+ so, check whether the virtual machine on the server has been
+ started with TLS support. The virtual machine's name in the
+ following example is sles.
+
+ &prompt.user;ps ax | grep qemu | grep "\-name sles" | awk -F" -vnc " '{ print FS $2 }'
+
+ If the output does not begin with a string similar to the
+ following, the machine has not been started with TLS support and
+ must be restarted.
+
+ -vnc 0.0.0.0:0,tls,x509verify=/etc/pki/libvirt
+
+
+
+
+
diff --git a/references/libvirt_guest_installation.xml b/references/libvirt_guest_installation.xml
new file mode 100644
index 000000000..6b0901fc8
--- /dev/null
+++ b/references/libvirt_guest_installation.xml
@@ -0,0 +1,707 @@
+
+
+ %entities;
+]>
+
+
+ Guest installation
+
+
+
+ yes
+
+
+
+ A &vmguest; consists of an image containing an operating system and data files and a
+ configuration file describing the &vmguest;'s virtual hardware resources. &vmguest;s are hosted
+ on and controlled by the &vmhost;. This section provides generalized instructions for
+ installing a &vmguest;. For a list of supported &vmguest;s, refer to the
+ section Supported guest operating systems in the article Virtualization
+ Limits and Support.
+
+
+ Virtual machines have few if any requirements above those required to run the operating system.
+ If the operating system has not been optimized for the virtual machine host environment, it can
+ only run on virtualization computer hardware, in full
+ virtualization mode, and requires specific device drivers to be loaded. The hardware that is
+ presented to the &vmguest; depends on the configuration of the host.
+
+
+ You should be aware of any licensing issues related to running a single licensed copy of an
+ operating system on multiple virtual machines. Consult the operating system license agreement
+ for more information.
+
+
+ GUI-based guest installation
+
+
+ Changing default options for new virtual machines
+
+ You can change default values that are applied when creating new virtual machines. For
+ example, to set UEFI as the default firmware type for new virtual machines, select
+ EditPreferences from &vmm;'s
+ main menu, click New VM and set UEFI as the firmware
+ default.
+
+
+ Specifying default options for new VMs
+
+
+
+
+
+
+
+
+
+
+
+
+ The New VM wizard helps you through the steps required to create a virtual
+ machine and install its operating system. To start it, open the &vmm; and select
+ FileNew Virtual Machine.
+
+
+
+
+
+ Start the New VM with &vmm;.
+
+
+
+
+ Choose an installation source—either a locally available media or a network
+ installation source. To set up your &vmguest; from an existing image, choose
+ import existing disk image.
+
+
+
+
+ Depending on your choice in the previous step, you need to provide the following data:
+
+
+
+ Local install media (ISO image or CDROM)
+
+
+ Specify the path on the &vmhost; to an ISO image containing the installation data.
+ If it is available as a volume in a libvirt storage pool, you can also select it
+ using Browse.
+
+
+
+ Alternatively, choose a physical CD-ROM or DVD inserted in the optical drive of the
+ &vmhost;.
+
+
+
+
+ Network install (HTTP, HTTPS or FTP)
+
+
+ Provide the URL pointing to the installation source. Valid URL
+ prefixes are, for example, ftp://, http://
+ and https://.
+
+
+ Under URL Options, provide a path to an auto-installation file
+ (&ay; or Kickstart, for example) and kernel parameters. Having provided a URL, the
+ operating system should be automatically detected correctly. If this is not the
+ case, deselect Automatically Detect Operating System Based on
+ Install-Media and manually select the OS Type and
+ Version.
+
+
+
+
+
+ Import existing disk image
+
+
+ To set up the &vmguest; from an existing image, you need to specify the path on the
+ &vmhost; to the image. If it is available as a volume in a libvirt storage pool,
+ you can also select it using Browse.
+
+
+
+
+
+ Manual install
+
+
+ This installation method is suitable to create a virtual machine, manually
+ configure its components and install its OS later. To adjust the VM to a specific
+ product version, start typing its name, for example,
+ sles—and select the desired version when a match appears.
+
+
+
+
+
+
+
+ Choose the memory size and number of CPUs for the new virtual machine.
+
+
+
+
+ This step is omitted when Import an Existing Image is chosen in the
+ first step.
+
+
+ Set up a virtual hard disk for the &vmguest;. Either create a new disk image or choose an
+ existing one from a storage pool
+
+ If you choose to create a disk, a
+ qcow2 image is created and stored under
+ /var/lib/libvirt/images by default.
+
+
+ Setting up a disk is optional. If you are running a live system directly from CD or DVD,
+ for example, you can omit this step by deactivating Enable Storage for this
+ Virtual Machine.
+
+
+
+
+ On the last screen of the wizard, specify the name for the virtual machine. To be offered
+ the possibility to review and make changes to the virtualized hardware selection,
+ activate Customize configuration before install. Specify the network
+ device under Network Selection. When using Bridge
+ device, the first bridge found on the host is pre-filled. To use a different
+ bridge, manually update the text box with its name.
+
+
+ Click Finish.
+
+
+
+
+ If you kept the defaults in the previous step, the installation starts. If you selected
+ Customize configuration before install, a &vmguest; configuration
+ dialog opens.
+
+
+
+ When you are done configuring, click Begin Installation.
+
+
+
+
+
+ Passing key combinations to virtual machines
+
+ The installation starts in a &vmm; console window. Certain key combinations, such as
+ F1
+ , are recognized by the &vmhost; but are not passed to the virtual machine. To
+ bypass the &vmhost;, &vmm; provides the sticky key functionality. Pressing
+ , , or three
+ times makes the key sticky, then you can press the remaining keys to pass the combination
+ to the virtual machine.
+
+
+ For example, to pass
+ F2 to a Linux virtual machine, press
+ three times, then press
+ F2. You can also press three times,
+ then press F2.
+
+
+ The sticky key functionality is available in the &vmm; during and after installing a
+ &vmguest;.
+
+
+
+
+ Configuring the virtual machine for PXE boot
+
+ PXE boot enables your virtual machine to boot from the installation media via the network,
+ instead of from a physical medium or an installation disk image.
+
+
+ To let your VM boot from a PXE server, follow these steps:
+
+
+
+
+ Start the installation wizard as described in .
+
+
+
+
+ Select the Manual Install method.
+
+
+
+
+ Proceed to the last step of the wizard and activate Customize configuration
+ before install. Confirm with Finish.
+
+
+
+
+ On the Customize screen, select Boot Options.
+
+
+
+
+ Inspect Boot device order and select Enable boot
+ menu.
+
+
+
+
+ To retain VirtIO Disk as the default boot option, confirm with
+ Apply.
+
+
+
+
+ To force the virtual machine to use PXE as the default boot option:
+
+
+
+
+ Select the NIC device in the boot menu configuration.
+
+
+
+
+ Move it to the top using the arrow signs on the right.
+
+
+
+
+ Confirm with Apply.
+
+
+
+
+
+
+
+
+ Start the installation by clicking Begin Installation. Now press
+ for boot menu and choose 1. iPXE. If a
+ PXE server is properly configured, the PXE menu screen appears.
+
+
+
+
+
+
+ Installing from the command line with virt-install
+
+
+ virt-install is a command-line tool that helps you create new virtual
+ machines using the &libvirt; library. It is useful if you cannot use the graphical user
+ interface, or need to automatize the process of creating virtual machines.
+
+
+
+ virt-install is a complex script with a lot of command line switches. The
+ following are required. For more information, see the man page of
+ virt-install (1).
+
+
+
+
+ General options
+
+
+
+
+ : Specify the name
+ of the new virtual machine. The name must be unique across all guests known to the
+ hypervisor on the same connection. It is used to create and name the guest’s
+ configuration file and you can access the guest with this name from
+ virsh. Alphanumeric and _-.:+ characters are
+ allowed.
+
+
+
+
+ : Specify the
+ amount of memory to allocate for the new virtual machine in megabytes.
+
+
+
+
+ : Specify the
+ number of virtual CPUs. For best performance, the number of virtual processors
+ should be less than or equal to the number of physical processors.
+
+
+
+
+
+
+ Virtualization type
+
+
+
+
+ : set up a paravirtualized guest. This is the default if
+ the &vmhost; supports paravirtualization and full virtualization.
+
+
+
+
+ : set up a fully virtualized guest.
+
+
+
+
+ : Specify the
+ hypervisor. Supported values is kvm.
+
+
+
+
+
+
+ Guest storage
+
+
+ Specify one of , or
+ the type of the storage for the new virtual machine. For
+ example, creates a 10 GB disk in the default image
+ location for the hypervisor and uses it for the &vmguest;. specifies the directory on
+ the &vmhost; to be exported to the guest. And sets up a
+ &vmguest; without a local storage (good for Live CDs).
+
+
+
+
+ Installation method
+
+
+ Specify the installation method using one of ,
+ , , , or
+ .
+
+
+
+
+ Accessing the installation
+
+
+ Use the option to specify
+ how to access the installation. &productname; supports the values
+ vnc or none.
+
+
+ If using VNC, virt-install tries to launch
+ virt-viewer. If it is not installed or cannot be run, connect to the
+ &vmguest; manually with your preferred viewer. To explicitly prevent
+ virt-install from launching the viewer, use
+ . To define a password for accessing the VNC session,
+ use the following syntax: .
+
+
+ In case you are using , you can access the &vmguest;
+ through operating system supported services, such as SSH or VNC. Refer to the operating
+ system installation manual on how to set up these services in the installation system.
+
+
+
+
+ Passing kernel and initrd files
+
+
+ It is possible to directly specify the Kernel and Initrd of the installer, for example,
+ from a network source.
+
+
+ To pass additional boot parameters, use the option. This
+ can be used to specify a network configuration.
+
+
+ Loading kernel and initrd from HTTP server
+&prompt.root;virt-install --location \
+"http://download.opensuse.org/pub/opensuse/distribution/leap/15.0/repo/oss" \
+--extra-args="textmode=1" --name "Leap15" --memory 2048 --virt-type kvm \
+--connect qemu:///system --disk size=10 --graphics vnc --network \
+network=vnet_nated
+&prompt.root;virt-install --location "http://example.tld/REPOSITORY/DVD1/" \
+--extra-args="textmode=1" --name "SLES15" --memory 2048 --virt-type kvm\
+--connect qemu:///system --disk size=10 --graphics vnc \
+--network network=vnet_nated
+
+
+
+
+ Enabling the console
+
+
+ By default, the console is not enabled for new virtual machines installed using
+ virt-install. To enable it, use as in the following example:
+
+&prompt.user;virt-install --virt-type kvm --name sles12 --memory 1024 \
+ --disk /var/lib/libvirt/images/disk1.qcow2 --os-variant sles12
+ --extra-args="console=ttyS0 textmode=1" --graphics none
+
+ After the installation finishes, the /etc/default/grub file in the
+ VM image is updated with the option on the
+ GRUB_CMDLINE_LINUX_DEFAULT line.
+
+
+
+
+ Using &uefisecboot;
+
+
+
+ &suse; supports &uefisecboot; on &x86-64; &kvm; guests only.
+
+
+
+ By default, new virtual machines installed using virt-install are
+ configured with a legacy BIOS. They can be configured to use UEFI with . A firmware that supports &uefisecboot; and has Microsoft keys
+ enrolled will be selected. If secure boot is undesirable, the option
+ can be used to select a UEFI firmware without secure boot support.
+
+
+ It is also possible to explicitly specify a UEFI firmware image. See
+ for advanced information and
+ examples on using UEFI with virtual machines.
+
+
+
+
+
+
+ Example of a virt-install command line
+
+ The following command line example creates a new &productnameshort; 15 SP2 virtual machine with a virtio
+ accelerated disk and network card. It creates a new 10 GB qcow2 disk image as a
+ storage, the source installation media being the host CD-ROM drive. It uses VNC graphics,
+ and it automatically launches the graphical client.
+
+
+
+ &kvm;
+
+&prompt.user;virt-install --connect qemu:///system --virt-type kvm \
+--name sle15sp2 --memory 1024 --disk size=10 --cdrom /dev/cdrom --graphics vnc \
+--os-variant sle15sp2
+
+
+
+
+
+
+ Advanced guest installation scenarios
+
+
+ This section provides instructions for operations exceeding the scope of a normal
+ installation, such as manually configuring UEFI firmware, memory ballooning and installing
+ add-on products.
+
+
+
+ Advanced UEFI configuration
+
+ The UEFI firmware used by virtual machines is provided by OVMF
+ (Open Virtual Machine Firmware). The
+ qemu-ovmf-x86_64 package provides firmware for &x86-64; &vmguest;s.
+ Firmware for &aarch64; &vmguest;s is provided by the qemu-uefi-aarch64
+ package. Both packages include several firmware variants, each supporting a different set of
+ features and capabilities. The packages also include JSON firmware descriptor files, which
+ describe the features and capabilities of each variant.
+
+
+ &libvirt; supports two methods of selecting virtual machine UEFI firmware: automatic and
+ manual. With automatic selection, &libvirt; will select a firmware based on an optional set
+ of features specified by the user. If no explicit features are specified, &libvirt; will
+ select a firmware with secure boot enabled and Microsoft keys enrolled. When using manual
+ selection, the full path of the firmware and any optional settings must be explicitly
+ specified. Users can reference the JSON descriptor files to find a firmware that satisfies
+ their requirements.
+
+
+
+ The directory /usr/share/qemu/firmware contains all the JSON files
+ used by &libvirt;. This file gives you detailed information about the firmware,
+ including the capabilities of the features.
+
+
+
+ When using virt-install, automatic firmware selection is enabled by
+ specifying the firmware=efi parameter to the boot
+ option, for example, . The selection process can be
+ influenced by requesting the presence or absence of firmware features. The following
+ example illustrates automatic firmware selection with &uefisecboot; disabled.
+
+&prompt.user;virt-install --connect qemu:///system --virt-type kvm \
+--name sle15sp5 --memory 1024 --disk size=10 --cdrom /dev/cdrom --graphics vnc \
+--boot firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no \
+--os-variant sle15sp5
+
+
+ To ensure persistent &vmguest;s use the same firmware and variable store throughout their
+ lifetime, &libvirt; will record automatically selected firmware in the &vmguest; XML
+ configuration. Automatic firmware selection is a one-time activity. Once firmware has
+ been selected, it will only change if the &vmguest; administrator explicitly does so
+ using the manual firmware selection method.
+
+
+
+ The loader and nvram parameters are used for
+ manual firmware selection. loader is required, and
+ nvram defines an optional UEFI variable store. The following example
+ illustrates manual firmware selection with secure boot enabled.
+
+&prompt.user;virt-install --connect qemu:///system --virt-type kvm \
+--name sle15sp5 --memory 1024 --disk size=10 --cdrom /dev/cdrom --graphics vnc \
+--boot loader=/usr/share/qemu/ovmf-x86_64-smm-code.bin,loader.readonly=yes,loader.type=pflash,loader.secure=yes,nvram.template=/usr/share/qemu/ovmf-x86_64-smm-vars.bin \
+--os-variant sle15sp5
+
+
+ &libvirt; cannot modify any characteristics of the UEFI firmware. For example, it cannot
+ disable &uefisecboot; in a firmware that has &uefisecboot; enabled, even when specifying
+ loader.secure=no. &libvirt; will ensure the specified firmware can
+ satisfy any specified features. For example, it will reject configuration that disables
+ secure boot with loader.secure=no, but specifies a firmware that has
+ &uefisecboot; enabled.
+
+
+
+ The qemu-ovmf-x86_64 package contains several UEFI firmware images. For
+ example, the following subset supports SMM, &uefisecboot;, and has either Microsoft,
+ openSUSE or SUSE UEFI CA keys enrolled:
+
+
+&prompt.root;rpm -ql qemu-ovmf-x86_64
+[...]
+/usr/share/qemu/ovmf-x86_64-smm-ms-code.bin
+/usr/share/qemu/ovmf-x86_64-smm-ms-vars.bin
+/usr/share/qemu/ovmf-x86_64-smm-opensuse-code.bin
+/usr/share/qemu/ovmf-x86_64-smm-opensuse-vars.bin
+/usr/share/qemu/ovmf-x86_64-smm-suse-code.bin
+/usr/share/qemu/ovmf-x86_64-smm-suse-vars.bin
+[...]
+
+
+ For the &aarch64; architecture, the package is named qemu-uefi-aarch32:
+
+
+&prompt.root;rpm -ql qemu-uefi-aarch32
+[...]
+/usr/share/qemu/aavmf-aarch32-code.bin
+/usr/share/qemu/aavmf-aarch32-vars.bin
+/usr/share/qemu/firmware
+/usr/share/qemu/firmware/60-aavmf-aarch32.json
+/usr/share/qemu/qemu-uefi-aarch32.bin
+
+
+ The *-code.bin files are the UEFI firmware files. The
+ *-vars.bin files are corresponding variable store images that can be
+ used as a template for a per-VM non-volatile store. &libvirt; copies the specified
+ vars template to a per-VM path under
+ /var/lib/libvirt/qemu/nvram/ when first creating the VM. Files without
+ code or vars in the name can be used as a single UEFI
+ image. They are not as useful, since no UEFI variables persist across power cycles of the
+ VM.
+
+
+ The *-ms*.bin files contain UEFI CA keys as found on real hardware.
+ Therefore, they are configured as the default in &libvirt;. Likewise, the
+ *-suse*.bin files contain preinstalled &suse; keys. There is also a
+ set of files with no preinstalled keys.
+
+
+ For more details on OVMF, see
+ .
+
+
+
+
+ Memory ballooning with Windows guests
+
+ Memory ballooning is a method to change the amount of memory used by &vmguest; at runtime.
+ &kvm; hypervisors provides this method, but it needs to be supported by
+ the guest as well.
+
+
+ While &opensuse; and &productnameshort;-based guests support memory ballooning, Windows guests need the
+ Virtual Machine Driver Pack
+ (VMDP) to provide ballooning. To set the maximum memory greater than the initial
+ memory configured for Windows guests, follow these steps:
+
+
+
+
+ Install the Windows guest with the maximum memory equal or less than the initial value.
+
+
+
+
+ Install the Virtual Machine Driver Pack in the Windows guest to provide required
+ drivers.
+
+
+
+
+ Shut down the Windows guest.
+
+
+
+
+ Reset the maximum memory of the Windows guest to the required value.
+
+
+
+
+ Start the Windows guest again.
+
+
+
+
+
+
+ Including add-on products in the installation
+
+ Certain operating systems, such as &productname;, offer to include add-on products in the
+ installation process. If the add-on product installation source is provided via &scc;, no
+ special &vmguest; configuration is needed. If it is provided via CD/DVD or ISO image, it is
+ necessary to provide the &vmguest; installation system with both the standard installation
+ medium image and the image of the add-on product.
+
+
+ If you are using the GUI-based installation, select Customize Configuration Before
+ Install in the last step of the wizard and add the add-on product ISO image via
+ Add HardwareStorage.
+ Specify the path to the image and set the Device Type to
+ CD-ROM.
+
+
+ If you are installing from the command line, you need to set up the virtual CD/DVD drives
+ with the parameter rather than with . The
+ device that is specified first is used for booting.
+
+
+
+
diff --git a/references/libvirt_host.xml b/references/libvirt_host.xml
new file mode 100644
index 000000000..cf9f89a4b
--- /dev/null
+++ b/references/libvirt_host.xml
@@ -0,0 +1,1844 @@
+
+
+ %entities;
+]>
+
+
+ Preparing the &vmhost;
+
+ yes
+
+
+
+ Before you can install guest virtual machines, you need to prepare the
+ &vmhost; to provide the guests with the resources that they need for their
+ operation. Specifically, you need to configure:
+
+
+
+
+ Networking so that guests can make use of the
+ network connection provided the host.
+
+
+
+
+ A storage pool reachable from the host so that the
+ guests can store their disk images.
+
+
+
+
+ Configuring networks
+
+
+ There are two common network configurations to provide a &vmguest; with a
+ network connection:
+
+
+
+
+
+ A network bridge. This is the default and
+ recommended way of providing the guests with network connection.
+
+
+
+
+ A virtual network with forwarding enabled.
+
+
+
+
+
+ Network bridge
+
+ The network bridge configuration provides a Layer 2 switch for
+ &vmguest;s, switching Layer 2 Ethernet packets between ports on the
+ bridge based on MAC addresses associated with the ports. This gives the
+ &vmguest; Layer 2 access to the &vmhost;'s network. This configuration
+ is analogous to connecting the &vmguest;'s virtual Ethernet cable into
+ a hub that is shared with the host and other &vmguest;s running on the
+ host. The configuration is often referred to as shared
+ physical device.
+
+
+ The network bridge configuration is the default configuration of
+ &productname; when configured as a &kvm; hypervisor. It is the
+ preferred configuration when you simply want to connect &vmguest;s to
+ the &vmhost;'s LAN.
+
+
+ Managing network bridges from the command line
+
+ This section includes procedures to add or remove network bridges
+ using the command line.
+
+
+ Adding a network bridge
+
+ To add a new network bridge device on &vmhost;, follow these steps:
+
+
+
+
+ Log in as &rootuser; on the &vmhost; where you want to create a
+ new network bridge.
+
+
+
+
+ Choose a name for the new
+ bridge—virbr_test in our
+ example—and run
+
+&prompt.root;ip link add name VIRBR_TEST type bridge
+
+
+
+ Check if the bridge was created on &vmhost;:
+
+&prompt.root;bridge vlan
+[...]
+virbr_test 1 PVID Egress Untagged
+
+
+ virbr_test is present, but is not associated
+ with any physical network interface.
+
+
+
+
+ Bring the network bridge up and add a network interface to the
+ bridge:
+
+
+&prompt.root;ip link set virbr_test up
+&prompt.root;ip link set eth1 master virbr_test
+
+
+ Network interface must be unused
+
+ You can only assign a network interface that is not yet used
+ by another network bridge.
+
+
+
+
+
+ Optionally, enable STP (see
+ Spanning
+ Tree Protocol):
+
+&prompt.root;bridge link set dev virbr_test cost 4
+
+
+
+
+ Deleting a network bridge
+
+ To delete an existing network bridge device on &vmhost; from the
+ command line, follow these steps:
+
+
+
+
+ Log in as &rootuser; on the &vmhost; where you want to delete
+ an existing network bridge.
+
+
+
+
+ List existing network bridges to identify the name of the
+ bridge to remove:
+
+&prompt.root;bridge vlan
+[...]
+virbr_test 1 PVID Egress Untagged
+
+
+
+
+ Delete the bridge:
+
+&prompt.root;ip link delete dev virbr_test
+
+
+
+
+
+ Adding a network bridge with nmcli
+
+ This section includes procedures to add a network bridge with &nm;'s
+ command line tool nmcli.
+
+
+
+
+ List active network connections:
+
+
+&prompt.sudo;nmcli connection show --active
+NAME UUID TYPE DEVICE
+Ethernet connection 1 84ba4c22-0cfe-46b6-87bb-909be6cb1214 ethernet eth0
+
+
+
+
+ Add a new bridge device named br0 and verify
+ its creation:
+
+
+&prompt.sudo;nmcli connection add type bridge ifname br0
+Connection 'bridge-br0' (36e11b95-8d5d-4a8f-9ca3-ff4180eb89f7) \
+successfully added.
+&prompt.sudo;nmcli connection show --active
+NAME UUID TYPE DEVICE
+bridge-br0 36e11b95-8d5d-4a8f-9ca3-ff4180eb89f7 bridge br0
+Ethernet connection 1 84ba4c22-0cfe-46b6-87bb-909be6cb1214 ethernet eth0
+
+
+
+
+ Optionally, you can view the bridge settings:
+
+
+&prompt.sudo;nmcli -f bridge connection show bridge-br0
+bridge.mac-address: --
+bridge.stp: yes
+bridge.priority: 32768
+bridge.forward-delay: 15
+bridge.hello-time: 2
+bridge.max-age: 20
+bridge.ageing-time: 300
+bridge.group-forward-mask: 0
+bridge.multicast-snooping: yes
+bridge.vlan-filtering: no
+bridge.vlan-default-pvid: 1
+bridge.vlans: --
+
+
+
+
+ Link the bridge device to the physical Ethernet device
+ eth0:
+
+&prompt.sudo;nmcli connection add type bridge-slave ifname eth0 master br0
+
+
+
+ Disable the eth0 interface and enable the new
+ bridge:
+
+
+&prompt.sudo;nmcli connection down "Ethernet connection 1"
+&prompt.sudo;nmcli connection up bridge-br0
+Connection successfully activated (master waiting for slaves) \
+(D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/9)
+
+
+
+
+
+ Using VLAN interfaces
+
+ Sometimes it is necessary to create a private connection either
+ between two &vmhost;s or between &vmguest; systems. For example, to
+ migrate a &vmguest; to hosts in a different network segment. Or to
+ create a private bridge that only &vmguest; systems may connect to
+ (even when running on different &vmhost; systems). An easy way to
+ build such connections is to set up VLAN networks.
+
+
+ VLAN interfaces are commonly set up on the &vmhost;. They either
+ interconnect the different &vmhost; systems, or they may be set up as
+ a physical interface to an otherwise virtual-only bridge. It is even
+ possible to create a bridge with a VLAN as a physical interface that
+ has no IP address in the &vmhost;. That way, the guest systems have
+ no possibility to access the host over this network.
+
+
+ It is also possible to use the VLAN interface as a physical interface
+ of a bridge. This makes it possible to connect several &vmhost;-only
+ networks and allows live migration of &vmguest; systems that are
+ connected to such a network.
+
+
+
+
+
+ Virtual networks
+
+ &libvirt;-managed virtual networks are similar to bridged networks, but
+ typically have no Layer 2 connection to the &vmhost;. Connectivity to
+ the &vmhost;'s physical network is accomplished with Layer 3
+ forwarding, which introduces additional packet processing on the
+ &vmhost; as compared to a Layer 2 bridged network. Virtual networks
+ also provide DHCP and DNS services for &vmguest;s. For more information
+ on &libvirt; virtual networks, see the Network XML
+ format documentation at
+ .
+
+
+ A standard &libvirt; installation on &productname; already comes with a
+ predefined virtual network named default. It
+ provides DHCP and DNS services for the network, along with connectivity
+ to the &vmhost;'s physical network using the network address
+ translation (NAT) forwarding mode. Although it is predefined, the
+ default virtual network needs to be explicitly
+ enabled by the administrator. For more information on the forwarding
+ modes supported by &libvirt;, see the
+ Connectivity section of the Network
+ XML format documentation at
+ .
+
+
+ &libvirt;-managed virtual networks can be used to satisfy a wide range
+ of use cases, but are commonly used on &vmhost;s that have a wireless
+ connection or dynamic/sporadic network connectivity, such as laptops.
+ Virtual networks are also useful when the &vmhost;'s network has
+ limited IP addresses, allowing forwarding of packets between the
+ virtual network and the &vmhost;'s network. However, most server use
+ cases are better suited for the network bridge configuration, where
+ &vmguest;s are connected to the &vmhost;'s LAN.
+
+
+ Enabling forwarding mode
+
+ Enabling forwarding mode in a &libvirt; virtual network enables
+ forwarding in the &vmhost; by setting
+ /proc/sys/net/ipv4/ip_forward and
+ /proc/sys/net/ipv6/conf/all/forwarding to 1,
+ which turns the &vmhost; into a router. Restarting the &vmhost;'s
+ network may reset the values and disable forwarding. To avoid this
+ behavior, explicitly enable forwarding in the &vmhost; by editing the
+ /etc/sysctl.conf file and adding:
+
+net.ipv4.ip_forward = 1
+net.ipv6.conf.all.forwarding = 1
+
+
+ Managing virtual networks with &vmm;
+
+ You can define, configure and operate virtual networks with &vmm;.
+
+
+ Defining virtual networks
+
+
+
+ Start &vmm;. In the list of available connections, right-click
+ the name of the connection for which you need to configure the
+ virtual network, and then select Details.
+
+
+
+
+ In the Connection Details window, click the
+ Virtual Networks tab. You can see the list
+ of all virtual networks available for the current connection.
+ On the right, there are details of the selected virtual
+ network.
+
+
+ Connection details
+
+
+
+
+
+
+
+
+
+
+
+
+ To add a new virtual network, click Add.
+
+
+
+
+ Specify a name for the new virtual network.
+
+
+ Create virtual network
+
+
+
+
+
+
+
+
+
+
+
+
+ Specify the networking mode. For the NAT and
+ Routed types, you can specify to which
+ device to forward network communications. While
+ NAT (network address translation) remaps the
+ virtual network address space and allows sharing a single IP
+ address, Routed forwards packets from the
+ virtual network to the &vmhost;'s physical network with no
+ translation.
+
+
+
+
+ If you need IPv4 networking, activate Enable
+ IPv4 and specify the IPv4 network address. If you
+ need a DHCP server, activate Enable DHCPv4
+ and specify the assignable IP address range.
+
+
+
+
+ If you need IPv6 networking, activate Enable
+ IPv6 and specify the IPv6 network address. If you
+ need a DHCP server, activate Enable DHCPv6
+ and specify the assignable IP address range.
+
+
+
+
+ To specify a different domain name than the name of the virtual
+ network, select Custom under DNS
+ domain name and enter it here.
+
+
+
+
+ Click Finish to create the new virtual
+ network. On the &vmhost;, a new virtual network bridge
+ virbrX is
+ available, which corresponds to the newly created virtual
+ network. You can check with bridge link.
+ &libvirt; automatically adds iptables rules to allow traffic
+ to/from guests attached to the new
+ virbrX device.
+
+
+
+
+
+ Starting virtual networks
+
+ To start a virtual network that is temporarily stopped, follow
+ these steps:
+
+
+
+
+ Start &vmm;. In the list of available connections, right-click
+ the name of the connection for which you need to configure the
+ virtual network, and then select Details.
+
+
+
+
+ In the Connection Details window, click the
+ Virtual Networks tab. You can see the list
+ of all virtual networks available for the current connection.
+
+
+
+
+ To start the virtual network, click Start.
+
+
+
+
+
+ Stopping virtual networks
+
+ To stop an active virtual network, follow these steps:
+
+
+
+
+ Start &vmm;. In the list of available connections, right-click
+ the name of the connection for which you need to configure the
+ virtual network, and then select Details.
+
+
+
+
+ In the Connection Details window, click the
+ Virtual Networks tab. You can see the list
+ of all virtual networks available for the current connection.
+
+
+
+
+ Select the virtual network to be stopped, then click
+ Stop.
+
+
+
+
+
+ Deleting virtual networks
+
+ To delete a virtual network from &vmhost;, follow these steps:
+
+
+
+
+ Start &vmm;. In the list of available connections, right-click
+ the name of the connection for which you need to configure the
+ virtual network, and then select Details.
+
+
+
+
+ In the Connection Details window, click the
+ Virtual Networks tab. You can see the list
+ of all virtual networks available for the current connection.
+
+
+
+
+ Select the virtual network to be deleted, then click
+ Delete.
+
+
+
+
+
+
+ Obtaining IP addresses with nsswitch for NAT networks (in KVM)
+
+
+
+ On &vmhost;, install libvirt-nss, which provides NSS support
+ for libvirt:
+
+&prompt.sudo;zypper in libvirt-nss
+
+
+
+ Add libvirt to
+ /etc/nsswitch.conf:
+
+...
+hosts: files libvirt mdns_minimal [NOTFOUND=return] dns
+...
+
+
+
+ If NSCD is running, restart it:
+
+&prompt.sudo;systemctl restart nscd
+
+
+
+ Now you can reach the guest system by name from the host.
+
+
+ The NSS module has limited functionality. It reads
+ /var/lib/libvirt/dnsmasq/*.status files to
+ find the host name and corresponding IP addresses in a JSON record
+ describing each lease provided by dnsmasq. Host
+ name translation can only be done on those &vmhost;s using a
+ libvirt-managed bridged network backed by
+ dnsmasq.
+
+
+
+
+ Managing virtual networks with virsh
+
+ You can manage &libvirt;-provided virtual networks with the
+ virsh command line tool. To view all network
+ related virsh commands, run
+
+&prompt.sudo;virsh help network
+Networking (help keyword 'network'):
+ net-autostart autostart a network
+ net-create create a network from an XML file
+ net-define define (but don't start) a network from an XML file
+ net-destroy destroy (stop) a network
+ net-dumpxml network information in XML
+ net-edit edit XML configuration for a network
+ net-event Network Events
+ net-info network information
+ net-list list networks
+ net-name convert a network UUID to network name
+ net-start start a (previously defined) inactive network
+ net-undefine undefine an inactive network
+ net-update update parts of an existing network's configuration
+ net-uuid convert a network name to network UUID
+
+ To view brief help information for a specific
+ virsh command, run virsh help
+ VIRSH_COMMAND:
+
+&prompt.sudo;virsh help net-create
+ NAME
+ net-create - create a network from an XML file
+
+ SYNOPSIS
+ net-create <file>
+
+ DESCRIPTION
+ Create a network.
+
+ OPTIONS
+ [--file] <string> file containing an XML network description
+
+ Creating a network
+
+
+ To create a new running virtual network, run
+
+&prompt.sudo;virsh net-create VNET_DEFINITION.xml
+
+ The VNET_DEFINITION.xml XML file
+ includes the definition of the virtual network that &libvirt;
+ accepts.
+
+
+ To define a new virtual network without activating it, run
+
+&prompt.sudo;virsh net-define VNET_DEFINITION.xml
+
+ The following examples illustrate definitions of different types of
+ virtual networks.
+
+
+ NAT-based network
+
+ The following configuration allows &vmguest;s outgoing
+ connectivity if it is available on the &vmhost;. Without &vmhost;
+ networking, it allows guests to talk directly to each other.
+
+
+<network>
+<name>vnet_nated</name>
+<bridge name="virbr1"/>
+ <forward mode="nat"/>
+ <ip address="192.168.122.1" netmask="255.255.255.0">
+ <dhcp>
+ <range start="192.168.122.2" end="192.168.122.254"/>
+ <host mac="52:54:00:c7:92:da" name="host1.testing.com" \
+ ip="192.168.1.101"/>
+ <host mac="52:54:00:c7:92:db" name="host2.testing.com" \
+ ip="192.168.1.102"/>
+ <host mac="52:54:00:c7:92:dc" name="host3.testing.com" \
+ ip="192.168.1.103"/>
+ </dhcp>
+ </ip>
+</network>
+
+
+
+
+ The name of the new virtual network.
+
+
+
+
+ The name of the bridge device used to construct the virtual
+ network. When defining a new network with a <forward>
+ mode of "nat" or
+ "route" (or an isolated network with no
+ <forward> element), &libvirt; automatically generates a
+ unique name for the bridge device if none is given.
+
+
+
+
+ Inclusion of the <forward> element indicates that the
+ virtual network is connected to the physical LAN. The
+ mode attribute specifies the forwarding
+ method. The most common modes are "nat"
+ (Network Address Translation, the default),
+ "route" (direct forwarding to the physical
+ network, no address translation), and
+ "bridge" (network bridge configured
+ outside of &libvirt;). If the <forward> element is not
+ specified, the virtual network is isolated from other
+ networks. For a complete list of forwarding modes, see
+ .
+
+
+
+
+ The IP address and netmask for the network bridge.
+
+
+
+
+ Enable DHCP server for the virtual network, offering IP
+ addresses ranging from the specified start
+ and end attributes.
+
+
+
+
+ The optional <host> elements specify hosts that are
+ given names and predefined IP addresses by the built-in DHCP
+ server. Any IPv4 host element must specify the following: the
+ MAC address of the host to be assigned a given name, the IP
+ to be assigned to that host, and the name to be given to that
+ host by the DHCP server. An IPv6 host element differs
+ slightly from that for IPv4: there is no
+ mac attribute since a MAC address has no
+ defined meaning in IPv6. Instead, the name
+ attribute is used to identify the host to be assigned the
+ IPv6 address. For DHCPv6, the name is the
+ plain name of the client host sent by the client to the
+ server. This method of assigning a specific IP address can
+ also be used instead of the mac attribute
+ for IPv4.
+
+
+
+
+
+ Routed network
+
+ The following configuration routes traffic from the virtual
+ network to the LAN without applying any NAT. The IP address range
+ must be preconfigured in the routing tables of the router on the
+ &vmhost; network.
+
+
+<network>
+ <name>vnet_routed</name>
+ <bridge name="virbr1"/>
+ <forward mode="route" dev="eth1"/>
+ <ip address="192.168.122.1" netmask="255.255.255.0">
+ <dhcp>
+ <range start="192.168.122.2" end="192.168.122.254"/>
+ </dhcp>
+ </ip>
+</network>
+
+
+
+
+ The guest traffic may only go out via the
+ eth1 network device on the &vmhost;.
+
+
+
+
+
+ Isolated network
+
+ This configuration provides an isolated private network. The
+ guests can talk to each other, and to &vmhost;, but cannot reach
+ any other machines on the LAN, as the <forward> element is
+ missing in the XML description.
+
+<network>
+ <name>vnet_isolated</name>
+ <bridge name="virbr3"/>
+ <ip address="192.168.152.1" netmask="255.255.255.0">
+ <dhcp>
+ <range start="192.168.152.2" end="192.168.152.254"/>
+ </dhcp>
+ </ip>
+ </network>
+
+
+
+ Using an existing bridge on &vmhost;
+
+ This configuration shows how to use an existing &vmhost;'s
+ network bridge br0. &vmguest;s are directly
+ connected to the physical network. Their IP addresses are all on
+ the subnet of the physical network, and there are no restrictions
+ on incoming or outgoing connections.
+
+<network>
+ <name>host-bridge</name>
+ <forward mode="bridge"/>
+ <bridge name="br0"/>
+</network>
+
+
+
+
+ Listing networks
+
+ To list all virtual networks available to &libvirt;, run:
+
+&prompt.sudo;virsh net-list --all
+
+ Name State Autostart Persistent
+----------------------------------------------------------
+ crowbar active yes yes
+ vnet_nated active yes yes
+ vnet_routed active yes yes
+ vnet_isolated inactive yes yes
+
+
+ To list available domains, run:
+
+&prompt.sudo;virsh list
+ Id Name State
+----------------------------------------------------
+ 1 nated_sles12sp3 running
+ ...
+
+ To get a list of interfaces of a running domain, run
+ , or
+ optionally specify the interface to limit the output to this
+ interface. By default, it additionally outputs their IP and MAC
+ addresses:
+
+&prompt.sudo;virsh domifaddr nated_sles12sp3 --interface vnet0 --source lease
+ Name MAC address Protocol Address
+-------------------------------------------------------------------------------
+ vnet0 52:54:00:9e:0d:2b ipv6 fd00:dead:beef:55::140/64
+ - - ipv4 192.168.100.168/24
+
+ To print brief information of all virtual interfaces associated
+ with the specified domain, run:
+
+&prompt.sudo;virsh domiflist nated_sles12sp3
+Interface Type Source Model MAC
+---------------------------------------------------------
+vnet0 network vnet_nated virtio 52:54:00:9e:0d:2b
+
+
+ Getting details about a network
+
+ To get detailed information about a network, run:
+
+&prompt.sudo;virsh net-info vnet_routed
+Name: vnet_routed
+UUID: 756b48ff-d0c6-4c0a-804c-86c4c832a498
+Active: yes
+Persistent: yes
+Autostart: yes
+Bridge: virbr5
+
+
+ Starting a network
+
+ To start an inactive network that was already defined, find its
+ name (or unique identifier, UUID) with:
+
+&prompt.sudo;virsh net-list --inactive
+ Name State Autostart Persistent
+----------------------------------------------------------
+ vnet_isolated inactive yes yes
+
+ Then run:
+
+&prompt.sudo;virsh net-start vnet_isolated
+Network vnet_isolated started
+
+
+ Stopping a network
+
+ To stop an active network, find its name (or unique identifier,
+ UUID) with:
+
+&prompt.sudo;virsh net-list --inactive
+ Name State Autostart Persistent
+----------------------------------------------------------
+ vnet_isolated active yes yes
+
+ Then run:
+
+&prompt.sudo;virsh net-destroy vnet_isolated
+Network vnet_isolated destroyed
+
+
+ Removing a network
+
+ To remove the definition of an inactive network from &vmhost;
+ permanently, run:
+
+&prompt.sudo;virsh net-undefine vnet_isolated
+Network vnet_isolated has been undefined
+
+
+
+
+
+ Configuring a storage pool
+
+
+ When managing a &vmguest; on the &vmhost; itself, you can access the
+ complete file system of the &vmhost; to attach or create virtual hard
+ disks or to attach existing images to the &vmguest;. However, this is not
+ possible when managing &vmguest;s from a remote host. For this reason,
+ &libvirt; supports so called Storage Pools, which can be
+ accessed from remote machines.
+
+
+
+ CD/DVD ISO images
+
+ To be able to access CD/DVD ISO images on the &vmhost; from remote
+ clients, they also need to be placed in a storage pool.
+
+
+
+
+ &libvirt; knows two different types of storage: volumes and pools.
+
+
+
+
+ Storage volume
+
+
+ A storage volume is a storage device that can be assigned to a
+ guest—a virtual disk or a CD/DVD/floppy image. Physically, it
+ can be a block device, for example, a partition or a logical
+ volume, or a file on the &vmhost;.
+
+
+
+
+ Storage pool
+
+
+ A storage pool is a storage resource on the &vmhost; that can be
+ used for storing volumes, similar to network storage for a desktop
+ machine. Physically it can be one of the following types:
+
+
+
+ File system directory (dir)
+
+
+ A directory for hosting image files. The files can be either
+ one of the supported disk formats (raw or qcow2), or ISO
+ images.
+
+
+
+
+ Physical disk device (disk)
+
+
+ Use a complete physical disk as storage. A partition is
+ created for each volume that is added to the pool.
+
+
+
+
+ Pre-formatted block device (fs)
+
+
+ Specify a partition to be used in the same way as a file
+ system directory pool (a directory for hosting image files).
+ The only difference to using a file system directory is that
+ &libvirt; takes care of mounting the device.
+
+
+
+
+ iSCSI target (iscsi)
+
+
+ Set up a pool on an iSCSI target. You need to have been
+ logged in to the volume once before to use it with &libvirt;.
+ Volume
+ creation on iSCSI pools is not supported; instead, each
+ existing Logical Unit Number (LUN) represents a volume. Each
+ volume/LUN also needs a valid (empty) partition table or disk
+ label before you can use it. If missing, use
+ fdisk to add it:
+
+&prompt.sudo;fdisk -cu /dev/disk/by-path/ip-&wsIip;:3260-iscsi-iqn.2010-10.com.example:[...]-lun-2
+Device contains neither a valid DOS partition table, nor Sun, SGI
+or OSF disklabel
+Building a new DOS disklabel with disk identifier 0xc15cdc4e.
+Changes will remain in memory only, until you decide to write them.
+After that, of course, the previous content won't be recoverable.
+
+Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
+
+Command (m for help): w
+The partition table has been altered!
+
+Calling ioctl() to re-read partition table.
+Syncing disks.
+
+
+
+ LVM volume group (logical)
+
+
+ Use an LVM volume group as a pool. You can either use a
+ predefined volume group, or create a group by specifying the
+ devices to use. Storage volumes are created as partitions on
+ the volume.
+
+
+ Deleting the LVM-based pool
+
+ When the LVM-based pool is deleted in the Storage Manager,
+ the volume group is deleted as well. This results in a
+ non-recoverable loss of all data stored on the pool.
+
+
+
+
+
+ Multipath devices (mpath)
+
+
+ At the moment, multipathing support is limited to assigning
+ existing devices to the guests. Volume creation or
+ configuring multipathing from within &libvirt; is not
+ supported.
+
+
+
+
+ Network exported directory (netfs)
+
+
+ Specify a network directory to be used in the same way as a
+ file system directory pool (a directory for hosting image
+ files). The only difference to using a file system directory
+ is that &libvirt; takes care of mounting the directory. The
+ supported protocol is NFS.
+
+
+
+
+
+
+ SCSI host adapter (scsi)
+
+
+ Use an SCSI host adapter in almost the same way as an iSCSI
+ target. We recommend to use a device name from
+ /dev/disk/by-* rather than
+ /dev/sdX. The
+ latter can change, for example, when adding or removing hard
+ disks. Volume creation on iSCSI pools is not supported.
+ Instead, each existing LUN (Logical Unit Number) represents a
+ volume.
+
+
+
+
+
+
+
+
+
+ Security considerations
+
+ To avoid data loss or data corruption, do not attempt to use resources
+ such as LVM volume groups, iSCSI targets, etc., that are also used to
+ build storage pools on the &vmhost;. There is no need to connect to
+ these resources from the &vmhost; or to mount them on the
+ &vmhost;—&libvirt; takes care of this.
+
+
+ Do not mount partitions on the &vmhost; by label. Under certain
+ circumstances it is possible that a partition is labeled from within a
+ &vmguest; with a name existing on the &vmhost;.
+
+
+
+
+ Managing storage with virsh
+
+ Managing storage from the command line is also possible by using
+ virsh. However, creating storage pools is currently
+ not supported by &suse;. Therefore, this section is restricted to
+ documenting functions such as starting, stopping and deleting pools,
+ and volume management.
+
+
+ A list of all virsh subcommands for managing pools
+ and volumes is available by running virsh help pool
+ and virsh help volume, respectively.
+
+
+ Listing pools and volumes
+
+ List all pools currently active by executing the following command.
+ To also list inactive pools, add the option :
+
+&prompt.user;virsh pool-list --details
+
+ Details about a specific pool can be obtained with the
+ pool-info subcommand:
+
+&prompt.user;virsh pool-info POOL
+
+ By default, volumes can only be listed per pool. To list all volumes
+ from a pool, enter the following command.
+
+&prompt.user;virsh vol-list --details POOL
+
+ At the moment virsh offers no tools to show
+ whether a volume is used by a guest or not. The following procedure
+ describes a way to list volumes from all pools that are currently
+ used by a &vmguest;.
+
+
+ Listing all storage volumes currently used on a &vmhost;
+
+
+ Create an XSLT stylesheet by saving the following content to a
+ file, for example, ~/libvirt/guest_storage_list.xsl:
+
+<?xml version="1.0" encoding="UTF-8"?>
+<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output method="text"/>
+ <xsl:template match="text()"/>
+ <xsl:strip-space elements="*"/>
+ <xsl:template match="disk">
+ <xsl:text> </xsl:text>
+ <xsl:value-of select="(source/@file|source/@dev|source/@dir)[1]"/>
+ <xsl:text> </xsl:text>
+ </xsl:template>
+</xsl:stylesheet>
+
+
+
+ Run the following commands in a shell. It is assumed that the
+ guest's XML definitions are all stored in the default location
+ (/etc/libvirt/qemu).
+ xsltproc is provided by the package
+ libxslt.
+
+SSHEET="$HOME/libvirt/guest_storage_list.xsl"
+cd /etc/libvirt/qemu
+for FILE in *.xml; do
+ basename $FILE .xml
+ xsltproc $SSHEET $FILE
+done
+
+
+
+
+ Starting, stopping, and deleting pools
+
+ Use the virsh pool subcommands to start, stop or
+ delete a pool. Replace POOL with the
+ pool's name or its UUID in the following examples:
+
+
+
+ Stopping a pool
+
+&prompt.user;virsh pool-destroy POOL
+
+ A pool's state does not affect attached volumes
+
+ Volumes from a pool attached to &vmguest;s are always
+ available, regardless of the pool's state
+ (Active (stopped) or
+ Inactive (started)). The state of the pool
+ solely affects the ability to attach volumes to a &vmguest;
+ via remote management.
+
+
+
+
+
+ Deleting a pool
+
+&prompt.user;virsh pool-delete POOL
+
+ Deleting storage pools
+
+ See
+
+
+
+
+
+ Starting a pool
+
+&prompt.user;virsh pool-start POOL
+
+
+
+ Enable autostarting a pool
+
+&prompt.user;virsh pool-autostart POOL
+
+ Only pools that are marked to autostart are automatically
+ started if the &vmhost; reboots.
+
+
+
+
+ Disable autostarting a pool
+
+&prompt.user;virsh pool-autostart POOL --disable
+
+
+
+
+
+ Adding volumes to a storage pool
+
+ virsh offers two ways to add volumes to storage
+ pools: either from an XML definition with
+ vol-create and vol-create-from
+ or via command line arguments with vol-create-as.
+ The first two methods are currently not supported by &suse;,
+ therefore this section focuses on the subcommand
+ vol-create-as.
+
+
+ To add a volume to an existing pool, enter the following command:
+
+&prompt.user;virsh vol-create-as POOLNAME 12G --formatraw|qcow2 --allocation 4G
+
+
+
+ Name of the pool to which the volume should be added
+
+
+
+
+ Name of the volume
+
+
+
+
+ Size of the image, in this example 12 gigabytes. Use the suffixes
+ k, M, G, T for kilobyte, megabyte, gigabyte, and terabyte,
+ respectively.
+
+
+
+
+ Format of the volume. &suse; currently supports
+ raw and qcow2.
+
+
+
+
+ Optional parameter. By default, virsh creates
+ a sparse image file that grows on demand. Specify the amount of
+ space that should be allocated with this parameter (4 gigabytes
+ in this example). Use the suffixes k, M, G, T for kilobyte,
+ megabyte, gigabyte, and terabyte, respectively.
+
+
+ When not specifying this parameter, a sparse image file with no
+ allocation is generated. To create a non-sparse volume, specify
+ the whole image size with this parameter (would be
+ 12G in this example).
+
+
+
+
+ Cloning existing volumes
+
+ Another way to add volumes to a pool is to clone an existing
+ volume. The new instance is always created in the same pool as the
+ original.
+
+&prompt.user;virsh vol-clone NAME_EXISTING_VOLUMENAME_NEW_VOLUME --pool POOL
+
+
+
+ Name of the existing volume that should be cloned
+
+
+
+
+ Name of the new volume
+
+
+
+
+ Optional parameter. &libvirt; tries to locate the existing
+ volume automatically. If that fails, specify this parameter.
+
+
+
+
+
+
+ Deleting volumes from a storage pool
+
+ To permanently delete a volume from a pool, use the subcommand
+ vol-delete:
+
+&prompt.user;virsh vol-delete NAME --pool POOL
+
+ is optional. &libvirt; tries to locate the
+ volume automatically. If that fails, specify this parameter.
+
+
+ No checks upon volume deletion
+
+ A volume is deleted in any case, regardless of whether it is
+ currently used in an active or inactive &vmguest;. There is no way
+ to recover a deleted volume.
+
+
+ Whether a volume is used by a &vmguest; can only be detected by
+ using by the method described in
+ .
+
+
+
+
+ Attaching volumes to a &vmguest;
+
+ After you create a volume as described in
+ , you can
+ attach it to a virtual machine and use it as a hard disk:
+
+&prompt.user;virsh attach-disk DOMAINSOURCE_IMAGE_FILETARGET_DISK_DEVICE
+
+ For example:
+
+&prompt.user;virsh attach-disk sles12sp3 /virt/images/example_disk.qcow2 sda2
+
+ To check if the new disk is attached, inspect the result of the
+ virsh dumpxml command:
+
+&prompt.root;virsh dumpxml sles12sp3
+[...]
+<disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/virt/images/example_disk.qcow2'/>
+ <backingStore/>
+ <target dev='sda2' bus='scsi'/>
+ <alias name='scsi0-0-0'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+</disk>
+[...]
+
+ Hotplug or persistent change
+
+ You can attach disks to both active and inactive domains. The
+ attachment is controlled by the and
+ options:
+
+
+
+
+
+
+ Hotplugs the disk to an active domain. The attachment is not
+ saved in the domain configuration. Using
+ on an inactive domain is an error.
+
+
+
+
+
+
+
+ Changes the domain configuration persistently. The attached
+ disk is then available after the next domain start.
+
+
+
+
+
+
+
+ Hotplugs the disk and adds it to the persistent domain
+ configuration.
+
+
+
+
+
+ virsh attach-device
+
+ virsh attach-device is the more generic form
+ of virsh attach-disk. You can use it to attach
+ other types of devices to a domain.
+
+
+
+
+
+ Detaching volumes from a &vmguest;
+
+ To detach a disk from a domain, use virsh
+ detach-disk:
+
+&prompt.root;virsh detach-disk DOMAINTARGET_DISK_DEVICE
+
+ For example:
+
+&prompt.root;virsh detach-disk sles12sp3 sda2
+
+ You can control the attachment with the and
+ options as described in
+ .
+
+
+
+
+
+ Managing storage with &vmm;
+
+ The &vmm; provides a graphical interface—the Storage
+ Manager—to manage storage volumes and pools. To access it, either
+ right-click a connection and choose Details, or
+ highlight a connection and choose Edit
+ Connection Details. Select the
+ Storage tab.
+
+
+
+
+
+
+
+
+
+
+
+
+ Adding a storage pool
+
+ To add a storage pool, proceed as follows:
+
+
+
+
+ Click Add in the bottom left corner. The
+ dialog Add a New Storage Pool appears.
+
+
+
+
+ Provide a Name for the pool (consisting of
+ only alphanumeric characters and _,
+ - or .) and select a
+ Type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Specify the required details below. They depend on the type of
+ pool you are creating.
+
+
+
+ ZFS pools are not supported.
+
+
+
+
+ Typedir
+
+
+
+
+ Target Path: specify an existing
+ directory.
+
+
+
+
+
+
+ Typedisk
+
+
+
+
+ Format: format of the device's
+ partition table. Using auto should
+ normally work. If not, get the required format by
+ running the command parted
+ on the &vmhost;.
+
+
+
+
+ Source Path: path to the device. It
+ is recommended to use a device name from
+ /dev/disk/by-* rather than the
+ simple
+ /dev/sdX,
+ since the latter can change, for example, when adding
+ or removing hard disks. You need to specify the path
+ that resembles the whole disk, not a partition on the
+ disk (if existing).
+
+
+
+
+
+
+ Typefs
+
+
+
+
+ Target Path: mount point on the
+ &vmhost; file system.
+
+
+
+
+ Format: file system format of the
+ device. The default value auto
+ should work.
+
+
+
+
+ Source Path: path to the device
+ file. It is recommended to use a device name from
+ /dev/disk/by-* rather than
+ /dev/sdX,
+ because the latter can change, for example, when adding
+ or removing hard disks.
+
+
+
+
+
+
+ Typeiscsi
+
+
+ Get the necessary data by running the following command on
+ the &vmhost;:
+
+&prompt.sudo;iscsiadm --mode node
+
+ It returns a list of iSCSI volumes with the following
+ format. The elements in bold text are required:
+
+IP_ADDRESS:PORT,TPGT TARGET_NAME_(IQN)
+
+
+
+ Target Path: the directory
+ containing the device file. Use
+ /dev/disk/by-path (default) or
+ /dev/disk/by-id.
+
+
+
+
+ Host Name: host name or IP address
+ of the iSCSI server.
+
+
+
+
+ Source IQN: the iSCSI target name
+ (iSCSI Qualified Name).
+
+
+
+
+ Initiator IQN: the iSCSI initiator
+ name.
+
+
+
+
+
+
+ Typelogical
+
+
+
+
+ Volgroup Name: specify the device
+ path of an existing volume group.
+
+
+
+
+
+
+ Typempath
+
+
+
+
+ Target Path: support for
+ multipathing is currently limited to making all
+ multipath devices available. Therefore, specify an
+ arbitrary string here. The path is required, otherwise
+ the XML parser fails.
+
+
+
+
+
+
+ Typenetfs
+
+
+
+
+ Target Path: mount point on the
+ &vmhost; file system.
+
+
+
+
+ Host Name: IP address or host name
+ of the server exporting the network file system.
+
+
+
+
+ Source Path: directory on the server
+ that is being exported.
+
+
+
+
+
+
+ Typerbd
+
+
+
+
+ Host Name: host name of the server
+ with an exported RADOS block device.
+
+
+
+
+ Source Name: name of the RADOS block
+ device on the server.
+
+
+
+
+
+
+ Typescsi
+
+
+
+
+ Target Path: directory containing
+ the device file. Use
+ /dev/disk/by-path (default) or
+ /dev/disk/by-id.
+
+
+
+
+ Source Path: name of the SCSI
+ adapter.
+
+
+
+
+
+
+
+ File browsing
+
+ Using the file browser by clicking Browse is
+ not possible when operating remotely.
+
+
+
+
+
+ Click Finish to add the storage pool.
+
+
+
+
+
+ Managing storage pools
+
+ &vmm;'s Storage Manager lets you create or delete volumes in a pool.
+ You may also temporarily deactivate or permanently delete existing
+ storage pools. Changing the basic configuration of a pool is
+ currently not supported by &suse;.
+
+
+ Starting, stopping, and deleting pools
+
+ The purpose of storage pools is to provide block devices located on
+ the &vmhost; that can be added to a &vmguest; when managing it from
+ remote. To make a pool temporarily inaccessible from remote, click
+ Stop in the bottom left corner of the Storage
+ Manager. Stopped pools are marked with State:
+ Inactive and are grayed out in the list pane. By default,
+ a newly created pool is automatically started On
+ Boot of the &vmhost;.
+
+
+ To start an inactive pool and make it available from remote again,
+ click Start in the bottom left corner of the
+ Storage Manager.
+
+
+ A pool's state does not affect attached volumes
+
+ Volumes from a pool attached to &vmguest;s are always available,
+ regardless of the pool's state (Active
+ (stopped) or Inactive (started)). The state of
+ the pool solely affects the ability to attach volumes to a
+ &vmguest; via remote management.
+
+
+
+ To permanently make a pool inaccessible, click
+ Delete in the bottom left corner of the Storage
+ Manager. You can only delete inactive pools. Deleting a pool does
+ not physically erase its contents on &vmhost;—it only deletes
+ the pool configuration. However, you need to be extra careful when
+ deleting pools, especially when deleting LVM volume group-based
+ tools:
+
+
+ Deleting storage pools
+
+ Deleting storage pools based on local file
+ system directories, local partitions or disks has no effect on
+ the availability of volumes from these pools currently attached
+ to &vmguest;s.
+
+
+ Volumes located in pools of type iSCSI, SCSI, LVM group or
+ Network Exported Directory become inaccessible from the &vmguest;
+ if the pool is deleted. Although the volumes themselves are not
+ deleted, the &vmhost; can no longer access the resources.
+
+
+ Volumes on iSCSI/SCSI targets or Network Exported Directory
+ become accessible again when creating an adequate new pool or
+ when mounting/accessing these resources directly from the host
+ system.
+
+
+ When deleting an LVM group-based storage pool, the LVM group
+ definition is erased and the LVM group no longer exists on the
+ host system. The configuration is not recoverable and all volumes
+ from this pool are lost.
+
+
+
+
+ Adding volumes to a storage pool
+
+ &vmm; lets you create volumes in all storage pools, except in pools
+ of types Multipath, iSCSI or SCSI. A volume in these pools is
+ equivalent to a LUN and cannot be changed from within &libvirt;.
+
+
+
+
+ A new volume can either be created using the Storage Manager or
+ while adding a new storage device to a &vmguest;. In either
+ case, select a storage pool from the left panel, then click
+ Create new volume.
+
+
+
+
+ Specify a Name for the image and choose an
+ image format.
+
+
+ &suse; currently only supports raw or
+ qcow2 images. The latter option is not
+ available on LVM group-based pools.
+
+
+ Next to Max Capacity, specify the maximum
+ size that the disk image is allowed to reach. Unless you are
+ working with a qcow2 image, you can also set
+ an amount for Allocation that should be
+ allocated initially. If the two values differ, a sparse image
+ file is created, which grows on demand.
+
+
+ For qcow2 images, you can use a
+ Backing Store (also called backing
+ file), which constitutes a base image. The newly
+ created qcow2 image then only records the
+ changes that are made to the base image.
+
+
+
+
+ Start the volume creation by clicking
+ Finish.
+
+
+
+
+
+ Deleting volumes from a storage pool
+
+ Deleting a volume can only be done from the Storage Manager, by
+ selecting a volume and clicking Delete Volume.
+ Confirm with Yes.
+
+
+ Volumes can be deleted even while in use
+
+ Volumes can be deleted even if they are currently used in an
+ active or inactive &vmguest;. There is no way to recover a
+ deleted volume.
+
+
+ Whether a volume is used by a &vmguest; is indicated in the
+ Used By column in the Storage Manager.
+
+
+
+
+
+
+
diff --git a/references/libvirt_managing.xml b/references/libvirt_managing.xml
new file mode 100644
index 000000000..4b7e9b732
--- /dev/null
+++ b/references/libvirt_managing.xml
@@ -0,0 +1,1270 @@
+
+
+ %entities;
+]>
+
+
+ Basic &vmguest; management
+
+
+
+ yes
+
+
+
+ 2025-06-19
+
+
+
+
+
+
+
+ Most management tasks, such as starting or stopping a &vmguest;, can either
+ be done using the graphical application &vmm; or on the command line using
+ virsh. Connecting to the graphical console via VNC is
+ only possible from a graphical user interface.
+
+
+ Managing &vmguest;s on a remote &vmhost;
+
+ If started on a &vmhost;, the &libvirt; tools &vmm;,
+ virsh, and virt-viewer can be used
+ to manage &vmguest;s on the host. However, it is also possible to manage
+ &vmguest;s on a remote &vmhost;. This requires configuring remote access
+ for &libvirt; on the host. For instructions, see
+ .
+
+
+ To connect to such a remote host with &vmm;, you need to set up a
+ connection as explained in
+ . If connecting to a
+ remote host using virsh or
+ virt-viewer, you need to specify a connection URI with
+ the parameter , for example, virsh -c
+ qemu+tls://&wsIIIname;/system.
+ The form of connection URI depends on the
+ connection type and the hypervisor—see
+ for details.
+
+
+ Examples in this chapter are all listed without a connection URI.
+
+
+
+ Listing &vmguest;s
+
+
+ The &vmguest; listing shows all &vmguest;s managed by &libvirt; on a
+ &vmhost;.
+
+
+
+ Listing &vmguest;s with &vmm;
+
+ The main window of the &vmm; lists all &vmguest;s for each &vmhost; it
+ is connected to. Each &vmguest; entry contains the machine's name, its
+ status (Running, Paused, or
+ Shutoff) displayed as an icon and literally, and a
+ CPU usage bar.
+
+
+
+
+ Listing &vmguest;s with virsh
+
+ Use the command virsh to get a
+ list of &vmguest;s:
+
+
+
+ List all running guests
+
+&prompt.user;virsh list
+
+
+
+ List all running and inactive guests
+
+&prompt.user;virsh list --all
+
+
+
+
+ For more information and further options, see virsh help
+ list or man 1 virsh.
+
+
+
+
+ Accessing the &vmguest; via console
+
+
+ &vmguest;s can be accessed via a VNC connection (graphical console) or,
+ if supported by the guest operating system, via a serial console.
+
+
+
+ Opening a graphical console
+
+ Opening a graphical console to a &vmguest; lets you interact with the
+ machine like a physical host via a VNC connection. If accessing the VNC
+ server requires authentication, you are prompted to enter a user name
+ (if applicable) and a password.
+
+
+ When you click into the VNC console, the cursor is
+ grabbed and cannot be used outside the console anymore.
+ To release it, press
+ .
+
+
+ Seamless (absolute) cursor movement
+
+ To prevent the console from grabbing the cursor and to enable
+ seamless cursor movement, add a tablet input device to the &vmguest;.
+
+
+
+
+ Certain key combinations such as
+
+ are interpreted by the host
+ system and are not passed to the &vmguest;. To pass such key
+ combinations to a &vmguest;, open the Send Key menu
+ from the VNC window and choose the desired key combination entry. The
+ Send Key menu is only available when using &vmm; and
+ virt-viewer. With &vmm;, you can alternatively use
+ the sticky key feature as explained in
+ .
+
+
+ Supported VNC viewers
+
+ Principally all VNC viewers can connect to the console of a
+ &vmguest;. However, if you are using SASL authentication and/or
+ TLS/SSL connection to access the guest, the options are limited.
+ Common VNC viewers such as tightvnc or
+ tigervnc support neither SASL authentication nor
+ TLS/SSL.
+
+
+
+ Opening a graphical console with &vmm;
+
+
+
+ In the &vmm;, right-click a &vmguest; entry.
+
+
+
+
+ Choose Open from the pop-up menu.
+
+
+
+
+
+ Opening a graphical console with virt-viewer
+
+ virt-viewer is a simple VNC viewer with added
+ functionality for displaying &vmguest; consoles. For example, it can
+ be started in wait mode, where it waits for a
+ &vmguest; to start before it connects. It also supports automatically
+ reconnecting to a &vmguest; that is rebooted.
+
+
+ virt-viewer addresses &vmguest;s by name, by ID or
+ by UUID. Use virsh to
+ get this data.
+
+
+ To connect to a guest that is running or paused, use either the ID,
+ UUID or name. &vmguest;s that are shut off do not have an
+ ID—you can only connect to them by UUID or name.
+
+
+
+ Connect to guest with the ID 8
+
+&prompt.user;virt-viewer 8
+
+
+
+ Connect to the inactive guest named sles12; the connection window opens once the guest starts
+
+&prompt.user;virt-viewer --wait sles12
+
+ With the option, the connection is
+ upheld even if the &vmguest; is not running at the moment. When
+ the guest starts, the viewer is launched.
+
+
+
+
+
+ For more information, see virt-viewer
+ or man 1 virt-viewer.
+
+
+ Password input on remote connections with SSH
+
+ When using virt-viewer to open a connection to a
+ remote host via SSH, the SSH password needs to be entered twice.
+ The first time for authenticating with &libvirt;, the second time
+ for authenticating with the VNC server. The second password needs
+ to be provided on the command line where virt-viewer was started.
+
+
+
+
+
+
+ Opening a serial console
+
+ Accessing the graphical console of a virtual machine requires a
+ graphical environment on the client accessing the &vmguest;. As an
+ alternative, virtual machines managed with libvirt can also be accessed
+ from the shell via the serial console and virsh. To
+ open a serial console to a &vmguest; named sles12, run
+ the following command:
+
+&prompt.user;virsh console sles12
+
+ virsh console takes two optional flags:
+ ensures exclusive access to the console,
+ disconnects any existing sessions before
+ connecting. Both features need to be supported by the guest operating
+ system.
+
+
+ Being able to connect to a &vmguest; via serial console requires that
+ the guest operating system supports serial console access and is
+ properly supported. Refer to the guest operating system manual for more
+ information.
+
+
+ Enabling serial console access for &sle; and &opensuse; guests
+
+ Serial console access in &sle; and &opensuse; is disabled by default.
+ To enable it, proceed as follows:
+
+
+
+ &slsa; 15, 16 and &opensuse;
+
+
+ Add console=ttyS0 to the Kernel Command Line Parameter.
+
+
+
+
+ &slsa; 11
+
+
+ Add console=ttyS0 to the Kernel Command Line Parameter.
+ Additionally, edit
+ /etc/inittab and uncomment the line with
+ the following content:
+
+#S0:12345:respawn:/sbin/agetty -L 9600 ttyS0 vt102
+
+
+
+
+
+
+
+ Changing a &vmguest;'s state: start, stop, pause
+
+
+ Starting, stopping or pausing a &vmguest; can be done with either &vmm;
+ or virsh. You can also configure a &vmguest; to be
+ automatically started when booting the &vmhost;.
+
+
+
+ When shutting down a &vmguest;, you may either shut it down gracefully,
+ or force the shutdown. The latter is equivalent to pulling the power plug
+ on a physical host and is only recommended if there are no alternatives.
+ Forcing a shutdown may cause file system corruption and loss of data on
+ the &vmguest;.
+
+
+
+ Graceful shutdown
+
+ To be able to perform a graceful shutdown, the &vmguest; must be
+ configured to support . If you have
+ created the guest with the &vmm;, ACPI should be available in the
+ &vmguest;.
+
+
+ Depending on the guest operating system, availability of ACPI may not
+ be sufficient to perform a graceful shutdown. It is strongly
+ recommended to test shutting down and rebooting a guest before using it
+ in production. &opensuse; or &sled;, for example, can require &pk;
+ authorization for shutdown and reboot. Make sure this policy is turned
+ off on all &vmguest;s.
+
+
+ If ACPI was enabled during a Windows XP/Windows Server 2003 guest
+ installation, turning it on in the &vmguest; configuration only is not
+ sufficient. For more information, see:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Regardless of the &vmguest;'s configuration, a graceful shutdown is
+ always possible from within the guest operating system.
+
+
+
+
+ Changing a &vmguest;'s state with &vmm;
+
+ Changing a &vmguest;'s state can be done either from &vmm;'s main
+ window, or from a VNC window.
+
+
+ State change from the &vmm; window
+
+
+ Right-click a &vmguest; entry.
+
+
+
+
+ Choose Run, Pause, or one of
+ the Shutdown options from the pop-up menu.
+
+
+
+
+ State change from the VNC window
+
+
+ Open a VNC Window as described in
+ .
+
+
+
+
+ Choose Run, Pause, or one of
+ the Shut Down options either from the toolbar or
+ from the Virtual Machine menu.
+
+
+
+
+ Automatically starting a &vmguest;
+
+ You can automatically start a guest when the &vmhost; boots. This
+ feature is not enabled by default and needs to be enabled for each
+ &vmguest; individually. There is no way to activate it globally.
+
+
+
+
+ Double-click the &vmguest; entry in &vmm; to open its console.
+
+
+
+
+ Choose View
+ Details to open the &vmguest;
+ configuration window.
+
+
+
+
+ Choose Boot Options and check Start
+ virtual machine on host boot up.
+
+
+
+
+ Save the new configuration with Apply.
+
+
+
+
+
+
+
+ Changing a &vmguest;'s state with virsh
+
+ In the following examples, the state of a &vmguest; named
+ sles12 is changed.
+
+
+
+ Start
+
+&prompt.user;virsh start sles12
+
+
+
+ Pause
+
+&prompt.user;virsh suspend sles12
+
+
+
+ Resume (a suspended &vmguest;)
+
+&prompt.user;virsh resume sles12
+
+
+
+ Reboot
+
+&prompt.user;virsh reboot sles12
+
+
+
+ Graceful shutdown
+
+&prompt.user;virsh shutdown sles12
+
+
+
+ Force shutdown
+
+&prompt.user;virsh destroy sles12
+
+
+
+ Turn on automatic start
+
+&prompt.user;virsh autostart sles12
+
+
+
+ Turn off automatic start
+
+&prompt.user;virsh autostart --disable sles12
+
+
+
+
+
+
+ Saving and restoring the state of a &vmguest;
+
+
+ Saving a &vmguest; preserves the exact state of the guest’s memory. The
+ operation is similar to hibernating a computer. A
+ saved &vmguest; can be quickly restored to the same running condition
+ prior to the save operation.
+
+
+
+ When saved, the &vmguest; is paused, its current memory state is saved to
+ a file, and then the guest is stopped. The operation does not make a copy
+ of any portion of the &vmguest;’s virtual disk. The time required to save
+ the virtual machine depends on the amount of memory allocated. The
+ &vmguest;'s resources are returned to the &vmhost; following a successful
+ save operation.
+
+
+
+ The restore operation loads a &vmguest;’s previously saved memory state
+ file and starts it. The guest is not booted but instead resumed at the
+ point where it was previously saved. The operation is similar to coming
+ out of hibernation.
+
+
+
+ &libvirt; supports several save file formats. The default format is called
+ and consists of a sequential stream of &vmguest;
+ memory pages. The sequential layout of the format is
+ not well suited for multiple readers and writers.
+
+
+
+ In addition to the save file format, &libvirt; also
+ supports several compressed formats: ,
+ , ,
+ and . Similar to the format, the
+ compressed formats consist of a sequential stream of &vmguest; memory
+ pages, but are compressed by the named compression algorithm before being written
+ to or read from the save file. These formats conserve space on the
+ device holding the save file, but increase save/restore times and host
+ CPU usage.
+
+
+
+ The save file format uses pre-calculated, fixed
+ offsets within the save file to read/write the &vmguest; memory pages. This
+ results in a save file that is roughly the logical size of the &vmguest;'s
+ memory, although its on-disk size depends on the &vmguest;'s actual memory
+ usage. With fixed offsets for the &vmguest; memory pages, the
+ format is well suited to support multiple readers
+ and writers, which may improve save and restore times for &vmguest;s
+ with large memory allocations.
+
+
+
+ The default save file format can be changed with the
+ setting in
+ /etc/libvirt/qemu.conf. The format can also be
+ specified when performing a save operation using virsh.
+ See for more information on
+ save and restore with virsh.
+
+
+
+ Because the &vmguest;'s state is saved to a file, make sure
+ there is enough space on the device hosting the save file. When
+ using the save file format, the logical save
+ file size will be approximately the same as the &vmguest; memory
+ allocation. However, the actual on-disk file size is usually smaller and
+ depends on the &vmguest; memory usage. Unused memory in the &vmguest;
+ is not written to the save file, hence the term sparse.
+
+
+
+ For the save file format, the logical
+ and on-disk file size are equivalent, and both depend on the
+ &vmguest;'s memory usage. Whether using the or
+ format, the on-disk save file size in megabytes
+ can be estimated by running the following command in the &vmguest;:
+
+
+&prompt.user;free -mh | awk '/^Mem:/ {print $3}'
+
+
+ The compressed formats will result in a smaller on-disk size, depending
+ on the efficiency of the specified compression algorithm.
+
+
+
+ Always restore saved guests
+
+ After a successful save operation, booting or starting the &vmguest;
+ by means other than a restore operation will render the saved state
+ file obsolete. The save file may contain file system data
+ that has not been flushed to disk. Attempting to restore the saved
+ state after the &vmguest; has executed by other means can result
+ in file system corruption.
+
+
+ Always use the same application when saving and restoring &vmguest;s.
+ For example, if virsh is used to save a &vmguest;,
+ do not restore it using &vmm;. In this case, make sure to restore
+ using virsh.
+
+
+
+
+ Synchronize &vmguest;'s time after restoring it
+
+ If you restore the &vmguest; after a long pause (hours) since it was
+ saved, its time synchronization service, for example,
+ &chronyd;, may refuse to synchronize its time. In this case,
+ manually synchronize &vmguest;'s time. For example, for &kvm; hosts,
+ you can use the &qemu; guest agent and instruct the guest with the
+ guest-set-time.
+
+
+
+
+
+ Saving/restoring with &vmm;
+
+ Saving a &vmguest;
+
+
+ Open a VNC connection window to a &vmguest;. Make sure the guest is
+ running.
+
+
+
+
+ Choose Virtual Machine
+ ShutdownSave.
+
+
+
+
+ Restoring a &vmguest;
+
+
+ Open a VNC connection window to a &vmguest;. Make sure the guest is
+ not running.
+
+
+
+
+ Choose Virtual Machine
+ Restore.
+
+
+ If the &vmguest; was previously saved using &vmm;, you are not
+ offered an option to Run the guest. However,
+ note the caveats on machines saved with virsh
+ outlined in .
+
+
+
+
+
+
+ Saving and restoring with virsh
+
+ &libvirt; provides more control over the save and restore operations
+ than &vmm;. virsh save and virsh
+ restore support several options to modify the behavior of the
+ operations. In the most basic form, a &vmguest; is saved by providing
+ its name, ID, or UUID and a file name. For example:
+
+&prompt.user;virsh save openSUSE-Leap /virtual/saves/openSUSE-Leap.vmsav
+
+ A basic &vmguest; restore operation only requires specifying the save
+ file name. For example:
+
+&prompt.user;virsh restore /virtual/saves/openSUSE-Leap.vmsav
+
+ As &vmguest; memory size increases, save and restore operations may
+ require additional options to attain satisfactory transfer rates,
+ particularly when the save image files are backed by high throughput
+ storage. The &vmhost; file system cache is often counterproductive
+ in these scenarios and should be avoided with the
+ option. For example:
+
+&prompt.user;virsh save --bypass-cache openSUSE-Leap /virtual/saves/openSUSE-Leap.vmsav
+&prompt.user;virsh restore --bypass-cache /virtual/saves/openSUSE-Leap.vmsav
+
+ The time required to save and restore &vmguest;s to high throughput
+ storage can be improved by using multiple channels to write and read
+ the &vmguest; memory pages. As noted in
+ , the
+ image format is required to use multiple channels. When selecting the
+ number of channels, care must be taken to ensure the operation does not
+ adversely affect other workloads running on the &vmhost;. When the
+ &vmhost; resources are statically partitioned, general advice would be
+ to use the same number of channels as physical CPUs dedicated to the
+ &vmguest;. The vCPUs of a &vmguest; are stopped at the start of a
+ save operation, thus it would be safe to use those CPU resources
+ to save &vmguest; memory pages.
+
+
+ The following examples save and restore a &vmguest; using 4 channels,
+ while also bypassing the &vmhost; file system cache:
+
+&prompt.user;virsh save --bypass-cache --image-format sparse --parallel-channels 4 openSUSE-Leap /virtual/saves/openSUSE-Leap.vmsav
+&prompt.user;virsh restore --bypass-cache --parallel-channels 4 /virtual/saves/openSUSE-Leap.vmsav
+
+ The image format is encoded in the save image file and does not need to
+ be specified during a restore operation.
+
+
+ For more information on save/restore and the supported options, see
+ virsh help save, virsh help restore
+ or man 1 virsh.
+
+
+
+
+ Creating and managing snapshots
+
+
+ &vmguest; snapshots are snapshots of the complete virtual machine
+ including the state of CPU, RAM, devices and the content of all writable
+ disks. To use virtual machine snapshots, all the attached hard disks need
+ to use the qcow2 disk image format, and at least one of them needs to be
+ writable.
+
+
+
+ Snapshots let you restore the state of the machine at a particular point
+ in time. This is useful when undoing a faulty configuration or the
+ installation of a lot of packages. After starting a snapshot that was
+ created while the &vmguest; was shut off, you need to boot it. Any
+ changes written to the disk afterward are lost when starting the
+ snapshot.
+
+
+
+
+ Snapshots are supported on &kvm; &vmhost;s only.
+
+
+
+
+ Terminology
+
+ There are several specific terms used to describe the types of
+ snapshots:
+
+
+
+ Internal snapshots
+
+
+ Snapshots that are saved into the qcow2 file of the original
+ &vmguest;. The file holds both the saved state of the snapshot
+ and the changes made since the snapshot was taken. The main
+ advantage of internal snapshots is that they are all stored in
+ one file and therefore it is easy to copy or move them across
+ multiple machines.
+
+
+
+
+ External snapshots
+
+
+ When creating an external snapshot, the original qcow2 file is
+ saved and made read-only, while a new qcow2 file is created to
+ hold the changes. The original file is sometimes called a
+ backing or base file,
+ while the new file with all the changes is called an
+ overlay or derived
+ file. External snapshots are useful when performing backups of
+ &vmguest;s. However, external snapshots are not supported by
+ &vmm;, and cannot be deleted by virsh
+ directly.
+
+
+
+
+ Live snapshots
+
+
+ Snapshots created when the original &vmguest; is running.
+ Internal live snapshots support saving the devices, and memory
+ and disk states, while external live snapshots with
+ virsh support saving either the memory state,
+ or the disk state, or both.
+
+
+
+
+ Offline snapshots
+
+
+ Snapshots created from a &vmguest; that is shut off. This ensures
+ data integrity as all the guest's processes are stopped and no
+ memory is in use.
+
+
+
+
+
+
+
+ Creating and managing snapshots with &vmm;
+
+ Internal snapshots only
+
+ &vmm; supports only internal snapshots, either live or offline.
+
+
+
+ To open the snapshot management view in &vmm;, open the VNC window as
+ described in .
+ Now either choose View
+ Snapshots or click Manage VM
+ Snapshots in the toolbar.
+
+
+
+
+
+
+
+
+
+
+
+
+ The list of existing snapshots for the chosen &vmguest; is displayed in
+ the left-hand part of the window. The snapshot that was last started is
+ marked with a green tick. The right-hand part of the window shows
+ details of the snapshot currently marked in the list. These details
+ include the snapshot's title and time stamp, the state of the &vmguest;
+ at the time the snapshot was taken and a description. Snapshots of
+ running guests also include a screenshot. The
+ Description can be changed directly from this view.
+ Other snapshot data cannot be changed.
+
+
+ Creating a snapshot
+
+ To take a new snapshot of a &vmguest;, proceed as follows:
+
+
+
+
+ Optionally, shut down the &vmguest; to create an offline
+ snapshot.
+
+
+
+
+ Click Add in the bottom left corner of the VNC
+ window.
+
+
+ The window Create Snapshot opens.
+
+
+
+
+ Provide a Name and, optionally, a description.
+ The name cannot be changed after the snapshot has been taken. To
+ be able to identify the snapshot later easily, use a
+ speaking name.
+
+
+
+
+ Confirm with Finish.
+
+
+
+
+
+ Deleting a snapshot
+
+ To delete a snapshot of a &vmguest;, proceed as follows:
+
+
+
+
+ Click Delete in the bottom left corner of the
+ VNC window.
+
+
+
+
+ Confirm the deletion with Yes.
+
+
+
+
+
+ Starting a snapshot
+
+ To start a snapshot, proceed as follows:
+
+
+
+
+ Click Run in the bottom left corner of the VNC
+ window.
+
+
+
+
+ Confirm the start with Yes.
+
+
+
+
+
+
+
+ Creating and managing snapshots with virsh
+
+ To list all existing snapshots for a domain
+ (admin_server in the following), run the
+ snapshot-list command:
+
+&prompt.user;virsh snapshot-list --domain sle-ha-node1
+ Name Creation Time State
+------------------------------------------------------------
+ sleha_12_sp2_b2_two_node_cluster 2016-06-06 15:04:31 +0200 shutoff
+ sleha_12_sp2_b3_two_node_cluster 2016-07-04 14:01:41 +0200 shutoff
+ sleha_12_sp2_b4_two_node_cluster 2016-07-14 10:44:51 +0200 shutoff
+ sleha_12_sp2_rc3_two_node_cluster 2016-10-10 09:40:12 +0200 shutoff
+ sleha_12_sp2_gmc_two_node_cluster 2016-10-24 17:00:14 +0200 shutoff
+ sleha_12_sp3_gm_two_node_cluster 2017-08-02 12:19:37 +0200 shutoff
+ sleha_12_sp3_rc1_two_node_cluster 2017-06-13 13:34:19 +0200 shutoff
+ sleha_12_sp3_rc2_two_node_cluster 2017-06-30 11:51:24 +0200 shutoff
+ sleha_15_b6_two_node_cluster 2018-02-07 15:08:09 +0100 shutoff
+ sleha_15_rc1_one-node 2018-03-09 16:32:38 +0100 shutoff
+
+ The snapshot that was last started is shown with the
+ snapshot-current command:
+
+&prompt.user;virsh snapshot-current --domain admin_server
+Basic installation incl. SMT for CLOUD4
+
+
+ Details about a particular snapshot can be obtained by running the
+ snapshot-info command:
+
+&prompt.user;virsh snapshot-info --domain admin_server \
+ -name "Basic installation incl. SMT for CLOUD4"
+Name: Basic installation incl. SMT for CLOUD4
+Domain: admin_server
+Current: yes
+State: shutoff
+Location: internal
+Parent: Basic installation incl. SMT for CLOUD3-HA
+Children: 0
+Descendants: 0
+Metadata: yes
+
+
+ Creating internal snapshots
+
+ To take an internal snapshot of a &vmguest;, either a live or
+ offline, use the snapshot-create-as command as
+ follows:
+
+&prompt.user;virsh snapshot-create-as --domain admin_server --name "Snapshot 1" \
+--description "First snapshot"
+
+
+
+ Domain name. Mandatory.
+
+
+
+
+ Name of the snapshot. It is recommended to use a speaking
+ name, since that makes it easier to identify the
+ snapshot. Mandatory.
+
+
+
+
+ Description for the snapshot. Optional.
+
+
+
+
+
+ Creating external snapshots
+
+ With virsh, you can take external snapshots of the
+ guest's memory state, disk state, or both.
+
+
+ To take both live and offline external snapshots of the guest's disk,
+ specify the option:
+
+&prompt.user;virsh snapshot-create-as --domain admin_server --name \
+ "Offline external snapshot" --disk-only
+
+ You can specify the option to control how
+ the external files are created:
+
+&prompt.user;virsh snapshot-create-as --domain admin_server --name \
+ "Offline external snapshot" \
+ --disk-only --diskspec vda,snapshot=external,file=/path/to/snapshot_file
+
+ To take a live external snapshot of the guest's memory, specify the
+ and options:
+
+&prompt.user;virsh snapshot-create-as --domain admin_server --name \
+ "Offline external snapshot" --live \
+ --memspec snapshot=external,file=/path/to/snapshot_file
+
+ To take a live external snapshot of both the guest's disk and memory
+ states, combine the ,
+ , and options:
+
+&prompt.user;virsh snapshot-create-as --domain admin_server --name \
+ "Offline external snapshot" --live \
+ --memspec snapshot=external,file=/path/to/snapshot_file
+ --diskspec vda,snapshot=external,file=/path/to/snapshot_file
+
+ Refer to the SNAPSHOT COMMANDS section in
+ man 1 virsh for more details.
+
+
+
+ Deleting a snapshot
+
+ External snapshots cannot be deleted with virsh.
+ To delete an internal snapshot of a &vmguest; and restore the disk
+ space it occupies, use the snapshot-delete
+ command:
+
+&prompt.user;virsh snapshot-delete --domain admin_server --snapshotname "Snapshot 2"
+
+
+ Starting a snapshot
+
+ To start a snapshot, use the snapshot-revert
+ command:
+
+&prompt.user;virsh snapshot-revert --domain admin_server --snapshotname "Snapshot 1"
+
+ To start the current snapshot (the one the &vmguest; was started
+ off), it is sufficient to use rather than
+ specifying the snapshot name:
+
+&prompt.user;virsh snapshot-revert --domain admin_server --current
+
+
+
+
+ Deleting a &vmguest;
+
+
+ By default, deleting a &vmguest; using virsh removes
+ only its XML configuration. Since attached storage is not deleted by
+ default, you can reuse it with another &vmguest;. With &vmm;, you can
+ also delete a guest's storage files as well.
+
+
+
+ Deleting a &vmguest; with &vmm;
+
+
+
+ In the &vmm;, right-click a &vmguest; entry.
+
+
+
+
+ From the context menu, choose Delete.
+
+
+
+
+ A confirmation window opens. Clicking Delete
+ permanently erases the &vmguest;. The deletion is not recoverable.
+
+
+ You can also permanently delete the guest's virtual disk by
+ activating Delete Associated Storage Files. The
+ deletion is not recoverable either.
+
+
+
+
+
+
+ Deleting a &vmguest; with virsh
+
+ To delete a &vmguest;, it needs to be shut down first. It is not
+ possible to delete a running guest. For information on shutting down,
+ see .
+
+
+ To delete a &vmguest; with virsh, run
+ virsh
+ VM_NAME.
+
+&prompt.user;virsh undefine sles12
+
+ There is no option to automatically delete the attached storage files.
+ If they are managed by libvirt, delete them as described in
+ .
+
+
+
+
+ Monitoring
+
+
+
+
+ Monitoring with &vmm;
+
+ After starting &vmm; and connecting to the &vmhost;, a CPU usage graph
+ of all the running guests is displayed.
+
+
+ It is also possible to get information about disk and network usage
+ with this tool, however, you must first activate this in
+ Preferences:
+
+
+
+
+ Run virt-manager.
+
+
+
+
+ Select Edit
+ Preferences.
+
+
+
+
+ Change the tab from General to
+ Polling.
+
+
+
+
+ Activate the check boxes for the kind of activity you want to see:
+ Poll Disk I/O, Poll Network
+ I/O, and Poll Memory stats.
+
+
+
+
+ If desired, also change the update interval using Update
+ status every n seconds.
+
+
+
+
+ Close the Preferences dialog.
+
+
+
+
+ Activate the graphs that should be displayed under
+ ViewGraph.
+
+
+
+
+ Afterward, the disk and network statistics are also displayed in the
+ main window of the &vmm;.
+
+
+ More precise data is available from the VNC window. Open a VNC window
+ as described in .
+ Choose Details from the toolbar or the
+ View menu. The statistics are displayed from the
+ Performance entry of the left-hand tree menu.
+
+
+
+
+ Monitoring with virt-top
+
+ virt-top is a command-line tool similar to the
+ well-known process monitoring tool top.
+ virt-top uses libvirt and therefore is capable of
+ showing statistics for &vmguest;s running on different hypervisors.
+
+
+ By default virt-top shows statistics for all running
+ &vmguest;s. Among the data that is displayed is the percentage of
+ memory used (%MEM) and CPU (%CPU)
+ and the uptime of the guest (TIME). The data is
+ updated regularly (every three seconds by default). The following shows
+ the output on a &vmhost; with seven &vmguest;s, four of them inactive:
+
+virt-top 13:40:19 - x86_64 8/8CPU 1283MHz 16067MB 7.6% 0.5%
+7 domains, 3 active, 3 running, 0 sleeping, 0 paused, 4 inactive D:0 O:0 X:0
+CPU: 6.1% Mem: 3072 MB (3072 MB by guests)
+
+ ID S RDRQ WRRQ RXBY TXBY %CPU %MEM TIME NAME
+ 7 R 123 1 18K 196 5.8 6.0 0:24.35 sled12_sp1
+ 6 R 1 0 18K 0 0.2 6.0 0:42.51 sles12_sp1
+ 5 R 0 0 18K 0 0.1 6.0 85:45.67 opensuse_leap
+ - (Ubuntu_1410)
+ - (debian_780)
+ - (fedora_21)
+ - (sles11sp3)
+
+ By default the output is sorted by ID. Use the following key
+ combinations to change the sort field:
+
+ P: CPU
+ usage
+ M:
+ total memory allocated by the guest
+ T: time
+ I: ID
+
+
+
+ To use any other field for sorting, press
+ F and
+ select a field from the list. To toggle the sort order, use
+ R.
+
+
+ virt-top also supports different views on the
+ &vmguest;s data, which can be changed on-the-fly by pressing the
+ following keys:
+
+ 0: default view1: show physical CPUs2: show network interfaces3: show virtual disks
+
+
+ virt-top supports more hot keys to change the view
+ of the data and many command line switches that affect the behavior of
+ the program. For more information, see man 1
+ virt-top.
+
+
+
+
+ Monitoring with kvm_stat
+
+ kvm_stat can be used to trace &kvm; performance
+ events. It monitors /sys/kernel/debug/kvm, so it
+ needs the debugfs to be mounted. On &productname; it should be mounted
+ by default. In case it is not mounted, use the following command:
+
+&prompt.sudo;mount -t debugfs none /sys/kernel/debug
+
+ kvm_stat can be used in three different modes:
+
+kvm_stat # update in 1 second intervals
+kvm_stat -1 # 1 second snapshot
+kvm_stat -l > kvmstats.log # update in 1 second intervals in log format
+ # can be imported to a spreadsheet
+
+ Typical output of kvm_stat
+kvm statistics
+
+ efer_reload 0 0
+ exits 11378946 218130
+ fpu_reload 62144 152
+ halt_exits 414866 100
+ halt_wakeup 260358 50
+ host_state_reload 539650 249
+ hypercalls 0 0
+ insn_emulation 6227331 173067
+ insn_emulation_fail 0 0
+ invlpg 227281 47
+ io_exits 113148 18
+ irq_exits 168474 127
+ irq_injections 482804 123
+ irq_window 51270 18
+ largepages 0 0
+ mmio_exits 6925 0
+ mmu_cache_miss 71820 19
+ mmu_flooded 35420 9
+ mmu_pde_zapped 64763 20
+ mmu_pte_updated 0 0
+ mmu_pte_write 213782 29
+ mmu_recycled 0 0
+ mmu_shadow_zapped 128690 17
+ mmu_unsync 46 -1
+ nmi_injections 0 0
+ nmi_window 0 0
+ pf_fixed 1553821 857
+ pf_guest 1018832 562
+ remote_tlb_flush 174007 37
+ request_irq 0 0
+ signal_exits 0 0
+ tlb_flush 394182 148
+
+
+ See
+
+ for further information on how to interpret these values.
+
+
+
+
diff --git a/references/libvirt_migrating_vms.xml b/references/libvirt_migrating_vms.xml
new file mode 100644
index 000000000..24a8af765
--- /dev/null
+++ b/references/libvirt_migrating_vms.xml
@@ -0,0 +1,611 @@
+
+
+ %entities;
+]>
+
+
+ Migrating &vmguest;s
+
+
+
+ yes
+
+
+
+ 2025-06-12
+
+
+
+
+
+
+
+ One of the major advantages of virtualization is that &vmguest;s are
+ portable. When a &vmhost; needs maintenance, or when the host becomes
+ overloaded, the guests can be moved to another &vmhost;. &kvm;
+ even support live migrations, during which the &vmguest; is
+ constantly available.
+
+
+ Types of migration
+
+
+ Depending on the required scenario, there are three ways you can migrate
+ virtual machines (VMs).
+
+
+
+
+ Live migration
+
+
+ The source VM continues to run while its configuration and memory are
+ transferred to the target host. When the transfer is complete, the
+ source VM is suspended and the target VM is resumed.
+
+
+ Live migration is useful for VMs that need to be online without any
+ downtime.
+
+
+
+ VMs experiencing heavy I/O load or frequent memory page writes are
+ challenging to live migrate. In such cases, consider using
+ non-live or offline migration.
+
+
+
+
+
+ Non-live migration
+
+
+ The source VM is suspended and its configuration and memory are
+ transferred to the target host. Then the target VM is resumed.
+
+
+ Non-live migration is more reliable than live migration, although it
+ creates downtime for the VM. If downtime is tolerable, non-live
+ migration can be an option for VMs that are difficult to live
+ migrate.
+
+
+
+
+ Offline migration
+
+
+ The VM definition is transferred to the target host. The source VM
+ is not stopped and the target VM is not resumed.
+
+
+ Offline migration can be used to migrate inactive VMs.
+
+
+
+ With offline migration, the option must be used.
+
+
+
+
+
+
+
+ Migration requirements
+
+
+ To successfully migrate a &vmguest; to another &vmhost;, the following
+ requirements need to be met:
+
+
+
+
+
+ The source and target systems must have the same architecture.
+
+
+
+
+ Storage devices must be accessible from both machines, for example,
+ via NFS or iSCSI. For more information, see
+ .
+
+
+ This is also true for CD-ROM or floppy images that are connected
+ during the move. However, you can disconnect them before the move as
+ described in .
+
+
+
+
+ &libvirtd; needs to run on both &vmhost;s and you must be able to open
+ a remote &libvirt; connection between the target and the source host
+ (or vice versa). Refer to
+ for details.
+
+
+
+
+ If a firewall is running on the target host, ports need to be opened
+ to allow the migration. If you do not specify a port during the
+ migration process, &libvirt; chooses one from the range 49152:49215.
+ Make sure that either this range (recommended) or a dedicated port of
+ your choice is opened in the firewall on the target
+ host.
+
+
+
+
+ The source and target machines should be in the same subnet on the
+ network, otherwise networking fails after the migration.
+
+
+
+
+ All &vmhost;s participating in migration must have the same UID for
+ the qemu user and the same GIDs for the kvm, qemu and libvirt groups.
+
+
+
+
+ No running or paused &vmguest; with the same name must exist on the
+ target host. If a shut-down machine with the same name exists, its
+ configuration is overwritten.
+
+
+
+
+ All CPU models, except the host cpu model, are
+ supported when migrating &vmguest;s.
+
+
+
+
+ The disk device type is not
+ migratable.
+
+
+
+
+ File system pass-through feature is incompatible with migration.
+
+
+
+
+ The &vmhost; and &vmguest; need to have proper timekeeping installed.
+
+
+
+
+
+ No physical devices can be passed from host to guest. Live migration
+ is currently not supported when using devices with PCI pass-through or
+ . If live migration needs to be
+ supported, use software virtualization (paravirtualization or full
+ virtualization).
+
+
+
+
+ The cache mode setting is an important setting for migration.
+
+
+
+
+
+ Backward migration, for example, from &slsa; 15 SP2 to 15 SP1, is not
+ supported.
+
+
+
+
+ SUSE strives to support live migration of &vmguest;s from a &vmhost;
+ running a service pack under LTSS to a &vmhost; running a newer
+ service pack within the same &slsa; major version. For example,
+ &vmguest; migration from a &slsa; 12 SP2 host to a &slsa; 12 SP5 host.
+ SUSE only performs minimal testing of LTSS-to-newer migration
+ scenarios and recommends thorough on-site testing before attempting to
+ migrate critical &vmguest;s.
+
+
+
+
+ The image directory should be located at the same path on both hosts.
+
+
+
+
+ All hosts should be on the same level of microcode (especially the
+ Spectre microcode updates). This can be achieved by installing the
+ latest updates of &productname; on all hosts.
+
+
+
+
+
+ Live-migrating with &vmm;
+
+
+ When using the &vmm; to migrate &vmguest;s, it does not matter on which
+ machine it is started. You can start &vmm; on the source or the target
+ host or even on a third host. In the latter case, you need to be able to
+ open remote connections to both the target and the source host.
+
+
+
+
+
+ Start &vmm; and establish a connection to the target or the source
+ host. If the &vmm; was started neither on the target nor the source
+ host, connections to both hosts need to be opened.
+
+
+
+
+ Right-click the &vmguest; that you want to migrate and choose
+ Migrate. Make sure the guest is running or
+ paused—it is not possible to migrate guests that are shut down.
+
+
+ Increasing the speed of the migration
+
+ To increase the speed of the migration, pause the &vmguest;. This is
+ the equivalent of non-live migration described in
+ .
+
+
+
+
+
+ Choose a New Host for the &vmguest;. If the desired
+ target host does not show up, make sure that you are connected to the
+ host.
+
+
+ To change the default options for connecting to the remote host, under
+ Connection, set the Mode, and
+ the target host's Address (IP address or host name)
+ and Port. If you specify a Port,
+ you must also specify an Address.
+
+
+ Under Advanced options, choose whether the move
+ should be permanent (default) or temporary, using Temporary
+ move.
+
+
+ Additionally, there is the option Allow unsafe,
+ which allows migrating without disabling the cache of the &vmhost;.
+ This can speed up the migration but only works when the current
+ configuration allows for a consistent view of the &vmguest; storage
+ without using
+ cache="none"/0_DIRECT.
+
+
+ Bandwidth option
+
+ In recent versions of &vmm;, the option of setting a bandwidth for
+ the migration has been removed. To set a specific bandwidth, use
+ virsh instead.
+
+
+
+
+
+ To perform the migration, click Migrate.
+
+
+ When the migration is complete, the Migrate window
+ closes and the &vmguest; is now listed on the new host in the &vmm;
+ window. The original &vmguest; is still available on the source host
+ in the shut-down state.
+
+
+
+
+
+ Migrating with virsh
+
+
+ To migrate a &vmguest; with virsh
+ , you need to have direct or remote shell access
+ to the &vmhost;, because the command needs to be run on the host. The
+ migration command looks like this:
+
+
+&prompt.user;virsh migrate [OPTIONS] VM_ID_or_NAMECONNECTION_URI [--migrateuri tcp://REMOTE_HOST:PORT]
+
+
+ The most important options are listed below. See virsh help
+ migrate for a full list.
+
+
+
+
+
+
+
+ Does a live migration. If not specified, the guest is paused during
+ the migration (non-live migration).
+
+
+
+
+
+
+
+ Leaves the VM paused on the target host during live or non-live
+ migration.
+
+
+
+
+
+
+
+ Persists the migrated VM on the target host. Without this option,
+ the VM is not included in the list of domains reported by
+ virsh list --all when shut down.
+
+
+
+
+
+
+
+ When specified, the &vmguest; definition on the source host is
+ deleted after a successful migration. However, virtual disks
+ attached to this guest are not deleted.
+
+
+
+
+
+
+
+ Parallel migration can be used to increase migration data throughput
+ in cases where a single migration thread is not capable of
+ saturating the network link between source and target hosts. On
+ hosts with 40 GB network interfaces, it may require four
+ migration threads to saturate the link. With parallel migration, the
+ time required to migrate large memory VMs can be reduced.
+
+
+
+
+
+
+ The following examples use &wsIVname; as the source system and &wsIname;
+ as the target system; the &vmguest;'s name is
+ opensuse131 with ID 37.
+
+
+
+
+ Non-live migration with default parameters
+
+&prompt.user;virsh migrate 37 qemu+ssh://&exampleuser_plain;@&wsIname;/system
+
+
+
+ Transient live migration with default parameters
+
+&prompt.user;virsh migrate --live opensuse131 qemu+ssh://&exampleuser_plain;@&wsIname;/system
+
+
+
+ Persistent live migration; delete VM definition on source
+
+&prompt.user;virsh migrate --live --persistent --undefinesource 37 \
+qemu+tls://&exampleuser_plain;@&wsIname;/system
+
+
+
+ Non-live migration using port 49152
+
+&prompt.user;virsh migrate opensuse131 qemu+ssh://&exampleuser_plain;@&wsIname;/system \
+--migrateuri tcp://@&wsIname;:49152
+
+
+
+ Live migration transferring all used storage
+
+&prompt.user;virsh migrate --live --persistent --copy-storage-all \
+opensuse156 qemu+ssh://&exampleuser_plain;@&wsIname;/system
+
+
+ When migrating VM's storage using the
+ option, the storage must be
+ placed in a &libvirt; storage pool. The target storage pool must
+ exist with an identical type and name as the source pool.
+
+
+ To obtain the XML representation of the source pool, use the
+ following command:
+
+&prompt.sudo;virsh pool-dumpxml EXAMPLE_VM > EXAMPLE_POOL.xml
+
+ To create and start the storage pool on the target host, copy its
+ XML representation there and use the following commands:
+
+&prompt.sudo;virsh pool-define EXAMPLE_POOL.xml
+&prompt.sudo;virsh pool-start EXAMPLE_VM
+
+
+
+
+
+
+ Transient compared to persistent migrations
+
+ By default, virsh migrate creates a temporary
+ (transient) copy of the &vmguest; on the target host. A shut-down
+ version of the original guest description remains on the source host. A
+ transient copy is deleted from the server after it is shut down.
+
+
+ To create a permanent copy of a guest on the target host, use the switch
+ . A shut-down version of the original guest
+ description remains on the source host, too. Use the option
+ together with
+ for a real move where a
+ permanent copy is created on the target host and the version on the
+ source host is deleted.
+
+
+ It is not recommended to use without
+ the option, since this results in the loss
+ of both &vmguest; definitions when the guest is shut down on the target
+ host.
+
+
+
+
+
+ Step-by-step example
+
+
+ Exporting the storage
+
+ First, you need to export the storage to share the guest image between
+ hosts. This can be done by an NFS server. In the following example, we
+ want to share the /volume1/VM directory for all
+ machines that are on the network 10.0.1.0/24. We are using a &sle; NFS
+ server. As root user, edit the /etc/exports file
+ and add:
+
+/volume1/VM 10.0.1.0/24 (rw,sync,no_root_squash)
+
+ You need to restart the NFS server:
+
+&prompt.sudo;systemctl restart nfsserver
+&prompt.sudo;exportfs
+/volume1/VM 10.0.1.0/24
+
+
+
+ Defining the pool on the target hosts
+
+ On each host where you want to migrate the &vmguest;, the pool must be
+ defined to be able to access the volume (that contains the Guest image).
+ Our NFS server IP address is 10.0.1.99, its share is the
+ /volume1/VM directory, and we want to get it
+ mounted in the /var/lib/libvirt/images/VM
+ directory. The pool name is VM. To define this
+ pool, create a VM.xml file with the following
+ content:
+
+<pool type='netfs'>
+ <name>VM</name>
+ <source>
+ <host name='10.0.1.99'/>
+ <dir path='/volume1/VM'/>
+ <format type='auto'/>
+ </source>
+ <target>
+ <path>/var/lib/libvirt/images/VM</path>
+ <permissions>
+ <mode>0755</mode>
+ <owner>-1</owner>
+ <group>-1</group>
+ </permissions>
+ </target>
+ </pool>
+
+ Then load it into &libvirt; using the pool-define
+ command:
+
+&prompt.root;virsh pool-define VM.xml
+
+ An alternative way to define this pool is to use the
+ virsh command:
+
+&prompt.root;virsh pool-define-as VM --type netfs --source-host 10.0.1.99 \
+ --source-path /volume1/VM --target /var/lib/libvirt/images/VM
+Pool VM created
+
+ The following commands assume that you are in the interactive shell of
+ virsh, which can also be reached by using the command
+ virsh without any arguments. Then the pool can be set
+ to start automatically at host boot (autostart option):
+
+virsh # pool-autostart VM
+Pool VM marked as autostarted
+
+ To disable the autostart:
+
+virsh # pool-autostart VM --disable
+Pool VM unmarked as autostarted
+
+ Check if the pool is present:
+
+virsh # pool-list --all
+ Name State Autostart
+-------------------------------------------
+ default active yes
+ VM active yes
+
+virsh # pool-info VM
+Name: VM
+UUID: 42efe1b3-7eaa-4e24-a06a-ba7c9ee29741
+State: running
+Persistent: yes
+Autostart: yes
+Capacity: 2,68 TiB
+Allocation: 2,38 TiB
+Available: 306,05 GiB
+
+ Pool needs to exist on all target hosts
+
+ Remember: This pool must be defined on each host where you want to be
+ able to migrate your &vmguest;.
+
+
+
+
+
+ Creating the volume
+
+ The pool has been defined—now we need a volume that contains the
+ disk image:
+
+virsh # vol-create-as VM sled12.qcow2 8G --format qcow2
+Vol sled12.qcow2 created
+
+ The volume names shown are used later to install the guest with
+ virt-install.
+
+
+
+
+ Creating the &vmguest;
+
+ Let us create a &productname; &vmguest; with the
+ virt-install command. The VM
+ pool is specified with the --disk option,
+ cache=none is recommended if you do not want to use
+ the --unsafe option while doing the migration.
+
+&prompt.root;virt-install --connect qemu:///system --virt-type kvm --name \
+ sles15 --memory 1024 --disk vol=VM/sled12.qcow2,cache=none --cdrom \
+ /mnt/install/ISO/SLE-15-Server-DVD-x86_64-Build0327-Media1.iso --graphics \
+ vnc --os-variant sled15
+Starting install...
+Creating domain...
+
+
+
+ Migrating the &vmguest;
+
+ Everything is ready to do the migration now. Run the
+ migrate command on the &vmhost; that is currently
+ hosting the &vmguest;, and choose the target.
+
+virsh # migrate --live sled12 --verbose qemu+ssh://IP/Hostname/system
+Password:
+Migration: [ 12 %]
+
+
+
diff --git a/references/libvirt_overview.xml b/references/libvirt_overview.xml
new file mode 100644
index 000000000..650522ac8
--- /dev/null
+++ b/references/libvirt_overview.xml
@@ -0,0 +1,463 @@
+
+
+ %entities;
+]>
+
+ &libvirt; daemons
+
+
+
+ yes
+
+
+
+ A &libvirt; deployment for accessing &kvm; requires one or more
+ daemons to be installed and active on the host. &libvirt; provides two
+ daemon deployment options: monolithic or modular daemons. &libvirt; has
+ always provided the single monolithic daemon &libvirtd;. It includes the
+ primary hypervisor drivers and all secondary drivers needed for managing
+ storage, networking, node devices, etc. The monolithic &libvirtd; also
+ provides secure remote access for external clients. Over time, &libvirt;
+ added support for modular daemons, where each driver runs in its own
+ daemon, allowing users to customize their &libvirt; deployment. Modular
+ daemons are enabled by default, but a deployment can be switched to the
+ traditional monolithic daemon by disabling the individual daemons and
+ enabling &libvirtd;.
+
+
+ The modular daemon deployment is useful in scenarios where minimal
+ &libvirt; support is needed. For example, if virtual machine storage and
+ networking is not provided by &libvirt;, the
+ libvirt-daemon-driver-storage and
+ libvirt-daemon-driver-network packages are not
+ required. &kube; is an example of an extreme case, where it handles all
+ networking, storage, cgroups and namespace integration, etc. Only the
+ libvirt-daemon-driver-&qemu; package, providing
+ virtqemud, needs to be installed.
+ Modular daemons allow configuring a custom &libvirt; deployment containing
+ only the components required for the use case.
+
+
+
+ Starting and stopping the modular daemons
+
+
+ The modular daemons are named after the driver which they are running,
+ with the pattern virtDRIVERd.
+ They are configured via the files
+ /etc/libvirt/virtDRIVERd.conf.
+ &suse; supports the virtqemud
+ hypervisor daemons, along with all the secondary daemons:
+
+
+
+
+
+ virtnetworkd - The virtual network management
+ daemon which provides &libvirt;'s virtual network management APIs.
+ For example, virtnetworkd can be used to create a NAT virtual network
+ on the host for use by virtual machines.
+
+
+
+
+ virtnodedevd - The host physical device
+ management daemon which provides &libvirt;'s node device management
+ APIs. For example, virtnodedevd can be used to detach a PCI device
+ from the host for use by a virtual machine.
+
+
+
+
+ virtnwfilterd - The host firewall management
+ daemon which provides &libvirt;'s firewall management APIs. For
+ example, virtnwfilterd can be used to configure network traffic
+ filtering rules for virtual machines.
+
+
+
+
+ virtsecretd - The host secret management daemon
+ which provides &libvirt;'s secret management APIs. For example,
+ virtsecretd can be used to store a key associated with a LUKs volume.
+
+
+
+
+ virtstoraged - The host storage management
+ daemon which provides &libvirt;'s storage management APIs.
+ virtstoraged can be used to create storage pools and create volumes
+ from those pools.
+
+
+
+
+ virtinterfaced - The host NIC management daemon
+ which provides &libvirt;'s host network interface management APIs.
+ For example, virtinterfaced can be used to create a bonded network
+ device on the host. &suse; discourages the use of &libvirt;'s interface
+ management APIs in favor of default networking tools like wicked or
+ &nm;. It is recommended to disable virtinterfaced.
+
+
+
+
+ virtproxyd - A daemon to proxy connections
+ between the traditional &libvirtd; sockets and the modular daemon
+ sockets. With a modular &libvirt; deployment, virtproxyd allows
+ remote clients to access the &libvirt; APIs similar to the monolithic
+ &libvirtd;. It can also be used by local clients that connect to the
+ monolithic &libvirtd; sockets.
+
+
+
+
+ virtlogd - A daemon to manage logs from virtual
+ machine consoles. virtlogd is also used by the monolithic &libvirtd;.
+ The monolithic daemon and virtqemud &systemd; unit files require
+ virtlogd, so it is not necessary to explicitly start virtlogd.
+
+
+
+
+ virtlockd - A daemon to manage locks held
+ against virtual machine resources such as disks. virtlockd is also
+ used by the monolithic &libvirtd;. The monolithic daemon, virtqemud,
+ &systemd; unit files require virtlockd, so it is not
+ necessary to explicitly start virtlockd.
+
+
+
+
+
+ virtlogd and
+ virtlockd are also used by the
+ monolithic &libvirtd;. These daemons have always been separate from
+ &libvirtd; for security reasons.
+
+
+
+ By default, the modular daemons listen for connections on the
+ /var/run/libvirt/virtDRIVERd-sock
+ and
+ /var/run/libvirt/virtDRIVERd-sock-ro
+ Unix Domain Sockets. The client library prefers these sockets over the
+ traditional /var/run/libvirt/libvirtd-sock. The
+ virtproxyd daemon is available for remote clients or local clients
+ expecting the traditional &libvirtd; socket.
+
+
+
+ The virtqemud and
+ virtnetworkd,
+ virtnodedevd,
+ virtnwfilterd,
+ virtstoraged and
+ virtsecretd are also enabled in
+ the presets, ensuring the daemons are enabled and available when the
+ corresponding packages are installed. Although enabled in presets for
+ convenience, the modular daemons can also be managed with their &systemd;
+ unit files:
+
+
+
+
+
+ virtDRIVERd.service -
+ The main unit file for launching the
+ virtDRIVERd daemon. We recommend
+ configuring the service to start on boot if VMs are also configured
+ to start on host boot.
+
+
+
+
+ virtDRIVERd.socket -
+ The unit file corresponding to the main read-write UNIX socket
+ /var/run/libvirt/virtDRIVERd-sock.
+ We recommend starting this socket on boot by default.
+
+
+
+
+ virtDRIVERd-ro.socket
+ - The unit file corresponding to the main read-only UNIX socket
+ /var/run/libvirt/virtDRIVERd-sock-ro.
+ We recommend starting this socket on boot by default.
+
+
+
+
+ virtDRIVERd-admin.socket
+ - The unit file corresponding to the administrative UNIX socket
+ /var/run/libvirt/virtDRIVERd-admin-sock.
+ We recommend starting this socket on boot by default.
+
+
+
+
+
+ When &systemd; socket activation is used, several configuration settings
+ in virtDRIVERd.conf are no longer honored.
+ Instead, these settings must be controlled via the system unit files:
+
+
+
+
+
+ unix_sock_group - UNIX socket group owner,
+ controlled via the parameter in the
+ virtDRIVERd.socket
+ and
+ virtDRIVERd-ro.socket
+ unit files.
+
+
+
+
+ unix_sock_ro_perms - Read-only UNIX socket
+ permissions, controlled via the parameter
+ in the
+ virtDRIVERd-ro.socket
+ unit file.
+
+
+
+
+ unix_sock_rw_perms - Read-write UNIX socket
+ permissions, controlled via the parameter
+ in the
+ virtDRIVERd.socket
+ unit file.
+
+
+
+
+ unix_sock_admin_perms - Admin UNIX socket
+ permissions, controlled via the parameter
+ in the
+ virtDRIVERd-admin.socket
+ unit file.
+
+
+
+
+ unix_sock_dir - Directory in which all UNIX
+ sockets are created, independently controlled via the
+ parameter in any of the
+ virtDRIVERd.socket,
+ virtDRIVERd-ro.socket
+ and
+ virtDRIVERd-admin.socket
+ unit files.
+
+
+
+
+
+ Starting and stopping the monolithic daemon
+
+
+ The monolithic daemon is known as &libvirtd; and is configured via
+ /etc/libvirt/libvirtd.conf. &libvirtd; is managed
+ with several &systemd; unit files:
+
+
+
+
+
+ libvirtd.service - The main &systemd; unit file
+ for launching &libvirtd;. We recommend configuring
+ libvirtd.service to start on boot if VMs are
+ also configured to start on host boot.
+
+
+
+
+ libvirtd.socket - The unit file corresponding to
+ the main read-write UNIX socket
+ /var/run/libvirt/libvirt-sock. We recommend
+ enabling this unit on boot.
+
+
+
+
+ libvirtd-ro.socket - The unit file corresponding
+ to the main read-only UNIX socket
+ /var/run/libvirt/libvirt-sock-ro. We recommend
+ enabling this unit on boot.
+
+
+
+
+ libvirtd-admin.socket - The unit file
+ corresponding to the administrative UNIX socket
+ /var/run/libvirt/libvirt-admin-sock. We
+ recommend enabling this unit on boot.
+
+
+
+
+ libvirtd-tcp.socket - The unit file
+ corresponding to the TCP 16509 port for non-TLS remote access. This
+ unit should not be configured to start on boot until the
+ administrator has configured a suitable authentication mechanism.
+
+
+
+
+ libvirtd-tls.socket - The unit file
+ corresponding to the TCP 16509 port for TLS remote access. This unit
+ should not be configured to start on boot until the administrator has
+ deployed x509 certificates and optionally configured a suitable
+ authentication mechanism.
+
+
+
+
+
+ When &systemd; socket activation is used, certain configuration settings
+ in libvirtd.conf are no longer honored. Instead,
+ these settings must be controlled via the system unit files:
+
+
+
+
+
+ listen_tcp - TCP socket usage is enabled by
+ starting the libvirtd-tcp.socket unit file.
+
+
+
+
+ listen_tls - TLS socket usage is enabled by
+ starting the libvirtd-tls.socket unit file.
+
+
+
+
+ tcp_port - Port for the non-TLS TCP socket,
+ controlled via the parameter in the
+ libvirtd-tcp.socket unit file.
+
+
+
+
+ tls_port - Port for the TLS TCP socket,
+ controlled via the parameter in the
+ libvirtd-tls.socket unit file.
+
+
+
+
+ listen_addr - IP address to listen on,
+ independently controlled via the
+ parameter in the libvirtd-tcp.socket or
+ libvirtd-tls.socket unit files.
+
+
+
+
+ unix_sock_group - UNIX socket group owner,
+ controlled via the parameter in the
+ libvirtd.socket and
+ libvirtd-ro.socket unit files.
+
+
+
+
+ unix_sock_ro_perms - Read-only UNIX socket
+ permissions, controlled via the parameter
+ in the libvirtd-ro.socket unit file.
+
+
+
+
+ unix_sock_rw_perms - Read-write UNIX socket
+ permissions, controlled via the parameter
+ in the libvirtd.socket unit file.
+
+
+
+
+ unix_sock_admin_perms - Admin UNIX socket
+ permissions, controlled via the parameter
+ in the libvirtd-admin.socket unit file.
+
+
+
+
+ unix_sock_dir - Directory in which all UNIX
+ sockets are created, independently controlled via the
+ parameter in any of the
+ libvirtd.socket,
+ libvirtd-ro.socket and
+ libvirtd-admin.socket unit files.
+
+
+
+
+
+
+ Switching to the monolithic daemon
+
+
+ Several services need to be changed when switching from modular to the
+ monolithic daemon. It is recommended to stop or evict any running virtual
+ machines before switching between the daemon options.
+
+
+
+
+
+ Stop the modular daemons and their sockets. The following example
+ disables the &qemu; daemon for &kvm; and several secondary daemons.
+
+
+for drv in qemu network nodedev nwfilter secret storage
+do
+ &prompt.sudo;systemctl stop virt${drv}d.service
+ &prompt.sudo;systemctl stop virt${drv}d{,-ro,-admin}.socket
+done
+
+
+
+
+ Disable future start of the modular daemons
+
+
+for drv in qemu network nodedev nwfilter secret storage
+do
+ &prompt.sudo;systemctl disable virt${drv}d.service
+ &prompt.sudo;systemctl disable virt${drv}d{,-ro,-admin}.socket
+done
+
+
+
+
+ Enable the monolithic &libvirtd; service and sockets
+
+
+&prompt.sudo;systemctl enable libvirtd.service
+&prompt.sudo;systemctl enable libvirtd{,-ro,-admin}.socket
+
+
+
+
+ Start the monolithic &libvirtd; sockets
+
+
+&prompt.sudo;systemctl start libvirtd{,-ro,-admin}.socket
+
+
+
+
+
diff --git a/references/libvirt_storage.xml b/references/libvirt_storage.xml
new file mode 100644
index 000000000..93d0849f8
--- /dev/null
+++ b/references/libvirt_storage.xml
@@ -0,0 +1,402 @@
+
+
+ %entities;
+]>
+
+
+ Advanced storage topics
+
+
+
+ yes
+
+
+
+ 2024-06-27
+
+
+
+
+
+
+
+ This chapter introduces advanced topics about manipulating storage from the
+ perspective of the &vmhost;.
+
+
+ Locking disk files and block devices with virtlockd
+
+
+ Locking block devices and disk files prevents concurrent writes to these
+ resources from different VM Guests. It provides protection against
+ starting the same &vmguest; twice, or adding the same disk to two
+ different virtual machines. This reduces the risk of a virtual machine's
+ disk image becoming corrupted because of a wrong configuration.
+
+
+
+ The locking is controlled by a daemon called
+ virtlockd. Since it operates
+ independently from the &libvirtd; daemon, locks endure a crash or a
+ restart of &libvirtd;. Locks even persist during an update of the
+ virtlockd itself, since it can
+ re-execute itself. This ensures that &vmguest;s do
+ not need to be restarted upon a
+ virtlockd update.
+ virtlockd is supported for
+ &kvm;, &qemu;.
+
+
+
+ Enable locking
+
+ Locking virtual disks is not enabled by default on &productname;. To
+ enable and automatically start it upon rebooting, perform the following
+ steps:
+
+
+
+
+ Edit /etc/libvirt/qemu.conf and set
+
+lock_manager = "lockd"
+
+
+
+ Start the virtlockd daemon
+ with the following command:
+
+&prompt.sudo;systemctl start virtlockd
+
+
+
+ Restart the &libvirtd; daemon with:
+
+&prompt.sudo;systemctl restart libvirtd
+
+
+
+ Make sure virtlockd is
+ automatically started when booting the system:
+
+&prompt.sudo;systemctl enable virtlockd
+
+
+
+
+
+ Configure locking
+
+ By default virtlockd is
+ configured to automatically lock all disks configured for your
+ &vmguest;s. The default setting uses a direct lockspace,
+ where the locks are acquired against the actual file paths associated
+ with the VM Guest <disk> devices. For example,
+ flock(2) is called directly on
+ /var/lib/libvirt/images/my-server/disk0.raw when
+ the &vmguest; contains the following <disk> device:
+
+<disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/var/lib/libvirt/images/my-server/disk0.raw'/>
+ <target dev='vda' bus='virtio'/>
+</disk>
+
+ The virtlockd configuration can
+ be changed by editing the file
+ /etc/libvirt/qemu-lockd.conf. It also contains
+ detailed comments with further information. Make sure to activate
+ configuration changes by reloading
+ virtlockd:
+
+&prompt.sudo;systemctl reload virtlockd
+
+
+ Enabling an indirect lockspace
+
+ The default configuration of
+ virtlockd uses a
+ direct lockspace. This means that the locks are
+ acquired against the actual file paths associated with the
+ <disk> devices.
+
+
+ If the disk file paths are not accessible to all hosts,
+ virtlockd can be configured
+ to allow an indirect lockspace. This means that a hash
+ of the disk image path is used to create a file in the indirect
+ lockspace directory. The locks are then held on these hash files
+ instead of the actual disk file paths. Indirect lockspace is also
+ useful if the file system containing the disk files does not support
+ fcntl() locks. An indirect lockspace is specified
+ with the setting:
+
+file_lockspace_dir = "/MY_LOCKSPACE_DIRECTORY"
+
+
+ Enable locking on LVM or iSCSI volumes
+
+ When wanting to lock virtual disks placed on LVM or iSCSI volumes
+ shared by several hosts, locking needs to be done by UUID rather than
+ by path (which is used by default). Furthermore, the lockspace
+ directory needs to be placed on a shared file system accessible by
+ all hosts sharing the volume. Set the following options for LVM
+ and/or iSCSI:
+
+lvm_lockspace_dir = "/MY_LOCKSPACE_DIRECTORY"
+iscsi_lockspace_dir = "/MY_LOCKSPACE_DIRECTORY"
+
+
+
+
+ Online resizing of guest block devices
+
+
+ Sometimes you need to change—extend or shrink—the size of the
+ block device used by your guest system. For example, when the disk space
+ originally allocated is no longer enough, it is time to increase its
+ size. If the guest disk resides on a logical volume,
+ you can resize it while the guest system is running. This is a big
+ advantage over an offline disk resizing as the service provided by
+ the guest is not interrupted by the resizing process. To resize a
+ &vmguest; disk, follow these steps:
+
+
+
+ Online resizing of guest disk
+
+
+ Inside the guest system, check the current size of the disk (for
+ example /dev/vda).
+
+&prompt.root;fdisk -l /dev/vda
+Disk /dev/sda: 160.0 GB, 160041885696 bytes, 312581808 sectors
+Units = sectors of 1 * 512 = 512 bytes
+Sector size (logical/physical): 512 bytes / 512 bytes
+I/O size (minimum/optimal): 512 bytes / 512 bytes
+
+
+
+ On the host, resize the logical volume holding the
+ /dev/vda disk of the guest to the required size,
+ for example, 200 GB.
+
+&prompt.root;lvresize -L 200G /dev/mapper/vg00-home
+Extending logical volume home to 200 GiB
+Logical volume home successfully resized
+
+
+
+ On the host, resize the block device related to the disk
+ /dev/mapper/vg00-home of the guest. You can find
+ the DOMAIN_ID with virsh
+ list.
+
+&prompt.root;virsh blockresize --path /dev/vg00/home --size 200G DOMAIN_ID
+Block device '/dev/vg00/home' is resized
+
+
+
+ Check that the new disk size is accepted by the guest.
+
+&prompt.root;fdisk -l /dev/vda
+Disk /dev/sda: 200.0 GB, 200052357120 bytes, 390727260 sectors
+Units = sectors of 1 * 512 = 512 bytes
+Sector size (logical/physical): 512 bytes / 512 bytes
+I/O size (minimum/optimal): 512 bytes / 512 bytes
+
+
+
+
+ Sharing directories between host and guests (file system pass-through)
+
+
+ libvirt allows to share directories between host and guests using
+ &qemu;'s file system pass-through (also called VirtFS) feature. Such a
+ directory can be also be accessed by several &vmguest;s at once and
+ therefore be used to exchange files between &vmguest;s.
+
+
+
+ Windows guests and file system pass-through
+
+ Sharing directories between &vmhost; and Windows guests via File System
+ Pass-Through does not work, because Windows lacks the drivers required
+ to mount the shared directory.
+
+
+
+
+ To make a shared directory available on a &vmguest;, proceed as follows:
+
+
+
+
+
+ Open the guest's console in &vmm; and either choose
+ View
+ Details from the menu or click
+ Show virtual hardware details in the toolbar.
+ Choose Add Hardware
+ Filesystem to open the
+ Filesystem Passthrough dialog.
+
+
+
+
+ Driver allows you to choose between a
+ Handle or Path base driver. The
+ default setting is Path. Mode
+ lets you choose the security model, which influences the way file
+ permissions are set on the host. Three options are available:
+
+
+
+ Passthrough (default)
+
+
+ Files on the file system are directly created with the
+ client-user's credentials. This is similar to what NFSv3 is
+ using.
+
+
+
+
+ Squash
+
+
+ Same as Passthrough, but failure of
+ privileged operations like chown are
+ ignored. This is required when &kvm; is not run with
+ root
+ privileges.
+
+
+
+
+ Mapped
+
+
+ Files are created with the file server's credentials
+ (qemu.qemu). The user credentials and the
+ client-user's credentials are saved in extended attributes.
+ This model is recommended when host and guest domains should be
+ kept isolated.
+
+
+
+
+
+
+
+ Specify the path to the directory on the &vmhost; with
+ Source Path. Enter a string at Target
+ Path to be used as a tag to mount the shared directory. The
+ string of this field is a tag only, not a path on the &vmguest;.
+
+
+
+
+ Apply the setting. If the &vmguest; is currently
+ running, you need to shut it down to apply the new setting (rebooting
+ the guest is not sufficient).
+
+
+
+
+ Boot the &vmguest;. To mount the shared directory, enter the
+ following command:
+
+&prompt.sudo;mount -t 9p -o trans=virtio,version=9p2000.L,rw TAG /MOUNT_POINT
+
+ To make the shared directory permanently available, add the following
+ line to the /etc/fstab file:
+
+TAG /MOUNT_POINT 9p trans=virtio,version=9p2000.L,rw 0 0
+
+
+
+
+ Using RADOS block devices with &libvirt;
+
+
+ RADOS Block Devices (RBD) store data in a &ceph; cluster. They allow
+ snapshotting, replication and data consistency. You can use an RBD from
+ your &libvirt;-managed &vmguest;s similarly to how you use other block
+ devices.
+
+
+
+ For more details, refer to the &ses; &admin;,
+ chapter Using libvirt with Ceph. The &ses;
+ documentation is available from
+ .
+
+
+
diff --git a/references/qemu_guest_installation.xml b/references/qemu_guest_installation.xml
new file mode 100644
index 000000000..466e2e246
--- /dev/null
+++ b/references/qemu_guest_installation.xml
@@ -0,0 +1,1059 @@
+
+
+ %entities;
+]>
+
+
+ Guest installation
+
+
+
+ yes
+
+
+
+ 2025-12-02
+
+
+
+
+
+
+
+ The libvirt-based tools such as
+ virt-manager and virt-install offer
+ convenient interfaces to set up and manage virtual machines. They act as a
+ kind of wrapper for the qemu-system-ARCH command.
+ However, it is also possible to use qemu-system-ARCH
+ directly without using
+ libvirt-based tools.
+
+
+ qemu-system-ARCH and libvirt
+
+ s created with
+ qemu-system-ARCH are not visible to the
+ &libvirt;-based tools.
+
+
+
+ Basic installation with qemu-system-ARCH
+
+
+ In the following example, a virtual machine for a &sls; 11 installation
+ is created. For detailed information on the commands, refer to the
+ respective man pages.
+
+
+
+ If you do not already have an image of a system that you want to run in a
+ virtualized environment, you need to create one from the installation
+ media. In such a case, you need to prepare a hard disk image, and obtain an
+ image of the installation media or the media itself.
+
+
+
+ Create a hard disk with qemu-img.
+
+
+&prompt.user;qemu-img create -f raw /images/sles/hda 8G
+
+
+
+
+ The subcommand tells
+ qemu-img to create a new image.
+
+
+
+
+ Specify the disk's format with the parameter.
+
+
+
+
+ The full path to the image file.
+
+
+
+
+ The size of the image, 8 GB in this case. The image is created
+ as a that grows when
+ the disk is filled with data. The specified size defines the maximum
+ size to which the image file can grow.
+
+
+
+
+
+ After at least one hard disk image is created, you can set up a virtual
+ machine with qemu-system-ARCH that boots into the
+ installation system:
+
+
+&prompt.root;qemu-system-x86_64 -name "sles"-machine accel=kvm -M pc -m 768 \
+-smp 2 -boot d \
+-drive file=/images/sles/hda,if=virtio,index=0,media=disk,format=raw \
+-drive file=/isos/&installmedia;,index=1,media=cdrom \
+-net nic,model=virtio,macaddr=52:54:00:05:11:11 -net user \
+-vga cirrus -balloon virtio
+
+
+
+
+ Name of the virtual machine that is displayed in the window caption
+ and can be used for the VNC server. This name must be unique.
+
+
+
+
+ Specifies the machine type. Use qemu-system-ARCH
+ to display a list of valid parameters.
+ pc is the default Standard PC.
+
+
+
+
+ Maximum amount of memory for the virtual machine.
+
+
+
+
+ Defines an SMP system with two processors.
+
+
+
+
+ Specifies the boot order. Valid values are a,
+ b (floppy 1 and 2), c (first
+ hard disk), d (first CD-ROM), or n to
+ p (Ether-boot from network adapter 1-3). Defaults
+ to c.
+
+
+
+
+ Defines the first (index=0) hard disk. It is
+ accessed as a paravirtualized (if=virtio) drive in
+ raw format.
+
+
+
+
+ The second (index=1) image drive acts as a CD-ROM.
+
+
+
+
+ Defines a paravirtualized (model=virtio) network
+ adapter with the MAC address 52:54:00:05:11:11. Be
+ sure to specify a unique MAC address; otherwise, a network conflict
+ may occur.
+
+
+
+
+ Specifies the graphic card. If you specify none,
+ the graphic card is disabled.
+
+
+
+
+ Defines the paravirtualized balloon device that allows dynamically
+ changing the amount of memory (up to the maximum value specified with
+ the parameter ).
+
+
+
+
+
+ After the installation of the guest operating system finishes, you can
+ start the related virtual machine without the need to specify the CD-ROM
+ device:
+
+
+&prompt.root;qemu-system-x86_64 -name "sles" -machine type=pc,accel=kvm -m 768 \
+-smp 2 -boot c \
+-drive file=/images/sles/hda,if=virtio,index=0,media=disk,format=raw \
+-net nic,model=virtio,macaddr=52:54:00:05:11:11 \
+-vga cirrus -balloon virtio
+
+
+
+ Managing disk images with qemu-img
+
+
+ In the previous section (see
+ ), we used the
+ qemu-img command to create an image of a hard disk.
+ You can, however, use qemu-img for general disk image
+ manipulation. This section introduces qemu-img
+ subcommands to help manage disk images flexibly.
+
+
+
+ General information on qemu-img invocation
+
+ qemu-img uses subcommands (like
+ zypper does) to do specific tasks. Each subcommand
+ understands a different set of options. Certain options are general and
+ used by more of these subcommands, while others are unique to the
+ related subcommand. See the qemu-img man page (man 1
+ qemu-img) for a list of all supported options.
+ qemu-img uses the following general syntax:
+
+&prompt.user;qemu-img subcommand [options]
+
+ and supports the following subcommands:
+
+
+
+ create
+
+
+ Creates a new disk image on the file system.
+
+
+
+
+ check
+
+
+ Checks an existing disk image for errors.
+
+
+
+
+ compare
+
+
+ Check if two images have the same content.
+
+
+
+
+ map
+
+
+ Dumps the metadata of the image file name and its backing file
+ chain.
+
+
+
+
+ amend
+
+
+ Modifies the options specific to the image format for the image file.
+
+
+
+
+ convert
+
+
+ Converts an existing disk image to a new one in a different
+ format.
+
+
+
+
+ info
+
+
+ Displays information about the relevant disk image.
+
+
+
+
+ snapshot
+
+
+ Manages snapshots of existing disk images.
+
+
+
+
+ commit
+
+
+ Applies changes made to an existing disk image.
+
+
+
+
+ rebase
+
+
+ Creates a new base image based on an existing image.
+
+
+
+
+ resize
+
+
+ Increases or decreases the size of an existing image.
+
+
+
+
+
+
+
+ Creating, converting, and checking disk images
+
+ This section describes how to create disk images, check their
+ condition, convert a disk image from one format to another, and get
+ detailed information about a particular disk image.
+
+
+ qemu-img create
+
+ Use qemu-img create to create a new disk image for
+ your &vmguest; operating system. The command uses the following
+ syntax:
+
+&prompt.user;qemu-img create -f fmt -o options fname size
+
+
+
+ The format of the target image. Supported formats are
+ raw and qcow2.
+
+
+
+
+ Certain image formats support additional options to be passed on the
+ command line. You can specify them here with the
+ -o option. The raw image
+ format supports only the size option, so it is
+ possible to insert -o size=8G instead of
+ adding the size option at the end of the command.
+
+
+
+
+ Path to the target disk image to be created.
+
+
+
+
+ Size of the target disk image (if not already specified with the
+ -o size=<image_size> option. Optional
+ suffixes for the image size are K (kilobyte),
+ M (megabyte), G (gigabyte),
+ or T (terabyte).
+
+
+
+
+ To create a new disk image sles.raw in the
+ directory /images growing up to a maximum size
+ of 4 GB, run the following command:
+
+&prompt.user;qemu-img create -f raw -o size=4G /images/sles.raw
+Formatting '/images/sles.raw', fmt=raw size=4294967296
+
+&prompt.user;ls -l /images/sles.raw
+-rw-r--r-- 1 tux users 4294967296 Nov 15 15:56 /images/sles.raw
+
+&prompt.user;qemu-img info /images/sles.raw
+image: /images/sles11.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 0
+
+
+ As you can see, the virtual size of the newly
+ created image is 4 GB, but the actual reported disk size is 0, as no
+ data has been written to the image yet.
+
+
+ &vmguest; images on the Btrfs file system
+
+ If you need to create a disk image on the Btrfs file system, you
+ can use to reduce the performance
+ overhead created by the copy-on-write feature of Btrfs:
+
+&prompt.user;qemu-img create -o nocow=on test.img 8G
+
+ If you, however, want to use copy-on-write, for example, for
+ creating snapshots or sharing them across virtual machines, then
+ leave the command line without the option.
+
+
+
+
+ qemu-img convert
+
+ Use qemu-img convert to convert disk images to
+ another format. To get a complete list of image formats supported by
+ &qemu;, run qemu-img and look
+ at the last line of the output. The command uses the following
+ syntax:
+
+&prompt.user;qemu-img convert -c -f fmt -O out_fmt -o options fname out_fname
+
+
+
+ Applies compression to the target disk image. Only
+ qcow and qcow2 formats
+ support compression.
+
+
+
+
+ The format of the source disk image. It is normally autodetected
+ and can therefore be omitted.
+
+
+
+
+ The format of the target disk image.
+
+
+
+
+ Specify additional options relevant to the target image format.
+ Use -o ? to view the list of options supported
+ by the target image format.
+
+
+
+
+ Path to the source disk image to be converted.
+
+
+
+
+ Path to the converted target disk image.
+
+
+
+&prompt.user;qemu-img convert -O vmdk /images/sles.raw \
+/images/sles.vmdk
+
+&prompt.user;ls -l /images/
+-rw-r--r-- 1 tux users 4294967296 16. lis 10.50 sles.raw
+-rw-r--r-- 1 tux users 2574450688 16. lis 14.18 sles.vmdk
+
+
+ To see a list of options relevant for the selected target image
+ format, run the following command (replace vmdk
+ with your image format):
+
+&prompt.user;qemu-img convert -O vmdk /images/sles.raw /images/sles.vmdk -o ?
+Supported options:
+size Virtual disk size
+backing_file File name of a base image
+compat6 VMDK version 6 image
+subformat VMDK flat extent format, can be one of {monolithicSparse \
+ (default) | monolithicFlat | twoGbMaxExtentSparse | twoGbMaxExtentFlat}
+scsi SCSI image
+
+
+
+ qemu-img check
+
+ Use qemu-img check to check the existing disk
+ image for errors. Not all disk image formats support this feature.
+ The command uses the following syntax:
+
+&prompt.user;qemu-img check -f fmt fname
+
+
+
+ The format of the source disk image. It is normally autodetected
+ and can therefore be omitted.
+
+
+
+
+ Path to the source disk image to be checked.
+
+
+
+
+ If no error is found, the command returns no output. Otherwise, the
+ type and number of errors found are shown.
+
+&prompt.user;qemu-img check -f qcow2 /images/sles.qcow2
+ERROR: invalid cluster offset=0x2af0000
+[...]
+ERROR: invalid cluster offset=0x34ab0000
+378 errors were found on the image.
+
+
+
+ Increasing the size of an existing disk image
+
+ When creating a new image, you must specify its maximum size before
+ the image is created (see
+ ). After
+ you have installed the &vmguest; and have been using it for a certain
+ time, the initial size of the image may no longer be sufficient. In
+ that case, add more space to it.
+
+
+ To increase the size of an existing disk image by 2 gigabytes, use:
+
+&prompt.user;qemu-img resize /images/sles.raw +2GB
+
+
+ You can resize the disk image using the formats
+ raw and qcow2. To resize an
+ image in another format, convert it to a supported format with
+ qemu-img convert first.
+
+
+
+ The image now contains an empty space of 2 GB after the final
+ partition. You can resize existing partitions or add new ones.
+
+
+
+ Advanced options for the qcow2 file format
+
+ qcow2 is the main disk image format used by
+ &qemu;. Its size grows on demand, and disk space is only
+ allocated when it is needed by the virtual machine.
+
+
+ A qcow2-formatted file is organized in units of constant size. These
+ units are called clusters. Viewed from the guest
+ side, the virtual disk is also divided into clusters of the same
+ size. &qemu; defaults to 64 kB clusters, but you can specify a
+ different value when creating a new image:
+
+&prompt.user;qemu-img create -f qcow2 -o cluster_size=128K virt_disk.qcow2 4G
+
+ A qcow2 image contains a set of tables organized in two levels that
+ are called the L1 and L2 tables. There is just one L1 table per disk
+ image, while there can be many L2 tables depending on how big the
+ image is.
+
+
+ To read or write data to the virtual disk, &qemu; needs to read its
+ corresponding L2 table to find out the relevant data location.
+ Because reading the table for each I/O operation consumes system
+ resources, &qemu; keeps a cache of L2 tables in memory to speed up
+ disk access.
+
+
+ Choosing the right cache size
+
+ The cache size relates to the amount of allocated space. L2 cache
+ can map the following amount of virtual disk:
+
+disk_size = l2_cache_size * cluster_size / 8
+
+ With the default 64 kB of cluster size, that is
+
+disk_size = l2_cache_size * 8192
+
+ Therefore, to have a cache that maps n gigabytes
+ of disk space with the default cluster size, you need
+
+l2_cache_size = disk_size_GB * 131072
+
+ &qemu; uses 1 MB (1048576 bytes) of L2 cache by default.
+ Following the above formulas, 1 MB of L2 cache covers
+ 8 GB (1048576 / 131072) of virtual disk. This means that the
+ performance is fine with the default L2 cache size if your virtual
+ disk size is up to 8 GB. For larger disks, you can speed up
+ disk access by increasing the L2 cache size.
+
+
+
+ Configuring the cache size
+
+ You can use the option on the &qemu;
+ command line to specify the cache size. Alternatively, when
+ communicating via QMP, use the blockdev-add
+ command. For more information on QMP, see
+ .
+
+
+ The following options configure the cache size for the virtual
+ guest:
+
+
+
+ l2-cache-size
+
+
+ The maximum size of the L2 table cache.
+
+
+
+
+ refcount-cache-size
+
+
+ The maximum size of the refcount block
+ cache. For more information on refcount,
+ see
+ .
+
+
+
+
+ cache-size
+
+
+ The maximum size of both caches combined.
+
+
+
+
+
+ When specifying values for the options above, be aware of the
+ following:
+
+
+
+
+ The size of both the L2 and refcount block caches needs to be a
+ multiple of the cluster size.
+
+
+
+
+ If you only set one option, &qemu; automatically
+ adjusts the other options so that the L2 cache is 4 times
+ bigger than the refcount cache.
+
+
+
+
+ The refcount cache is used much less often than the L2 cache;
+ therefore, you can keep it small:
+
+&prompt.root;qemu-system-ARCH [...] \
+ -drive file=disk_image.qcow2,l2-cache-size=4194304,refcount-cache-size=262144
+
+
+ Reducing memory usage
+
+ The larger the cache, the more memory it consumes. There is a
+ separate L2 cache for each qcow2 file. When using a lot of big disk
+ images, you may need a considerably large amount of memory. Memory
+ consumption is even worse if you add backing files
+ () and
+ snapshots (see
+ ) to the
+ guest's setup chain.
+
+
+ This is why &qemu; introduced the
+ setting. It defines an
+ interval in seconds after which all cache entries that have not
+ been accessed are removed from memory.
+
+
+ The following example removes all unused cache entries every 10
+ minutes:
+
+&prompt.root;qemu-system-ARCH [...] -drive file=hd.qcow2,cache-clean-interval=600
+
+ If this option is not set, the default value is 0, and it disables
+ this feature.
+
+
+
+
+
+
+ Managing snapshots of virtual machines with qemu-img
+
+ snapshots are snapshots of the complete
+ environment in which a &vmguest; is running. The snapshot includes the
+ state of the processor (CPU), memory (RAM), devices, and all writable
+ disks.
+
+
+ Snapshots are helpful when you need to save your virtual machine in a
+ particular state. For example, after you have configured network services on
+ a virtualized server and want to quickly start the virtual machine in
+ the same state you last saved it. Or you can create a snapshot after
+ the virtual machine has been powered off to create a backup state
+ before you try something experimental and make the &vmguest; unstable. This
+ section introduces the latter case, while the former is described in
+ .
+
+
+ To use snapshots, your &vmguest; must contain at least one writable
+ hard disk image in qcow2 format. This device is
+ normally the first virtual hard disk.
+
+
+ snapshots are created with the
+ savevm command in the interactive &qemu; monitor. To
+ make identifying a particular snapshot easier, you can assign it a
+ tag. For more information on the &qemu; monitor, see
+ .
+
+
+ Once your qcow2 disk image contains saved snapshots,
+ you can inspect them with the qemu-img snapshot
+ command.
+
+
+ Shut down the &vmguest;
+
+ Do not create or delete virtual machine snapshots with the
+ qemu-img snapshot command while the virtual
+ machine is running. Otherwise, you may damage the disk image with the
+ state of the virtual machine saved.
+
+
+
+ Listing existing snapshots
+
+ Use qemu-img snapshot -l
+ DISK_IMAGE to view a list of all existing
+ snapshots saved in the disk_image image. You can
+ get the list even while the &vmguest; is running.
+
+&prompt.user;qemu-img snapshot -l /images/sles.qcow2
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+
+
+
+ Unique auto-incremented identification number of the snapshot.
+
+
+
+
+ Unique description string of the snapshot. It is meant as a
+ human-readable version of the ID.
+
+
+
+
+ The disk space occupied by the snapshot. The more
+ memory is consumed by running applications, the bigger the
+ snapshot is.
+
+
+
+
+ Time and date the snapshot was created.
+
+
+
+
+ The current state of the virtual machine's clock.
+
+
+
+
+
+ Creating snapshots of a powered-off virtual machine
+
+ Use qemu-img snapshot -c
+ SNAPSHOT_TITLE
+ DISK_IMAGE to create a snapshot of the
+ current state of a virtual machine that was previously powered off.
+
+&prompt.user;qemu-img snapshot -c backup_snapshot /images/sles.qcow2
+&prompt.user;qemu-img snapshot -l /images/sles.qcow2
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+5 backup_snapshot 0 2013-11-22 14:14:00 00:00:00.000
+
+ If something breaks in your &vmguest; and you need to restore the
+ state of the saved snapshot (ID 5 in our example), power off your
+ &vmguest; and execute the following command:
+
+&prompt.user;qemu-img snapshot -a 5 /images/sles.qcow2
+
+ The next time you run the virtual machine with
+ qemu-system-ARCH, it will be in the state of
+ snapshot number 5.
+
+
+
+ The qemu-img snapshot -c command is not related
+ to the savevm command of &qemu; monitor (see
+ ). For example, you cannot apply
+ a snapshot with qemu-img snapshot -a on a
+ snapshot created with savevm in &qemu;'s
+ monitor.
+
+
+
+
+ Deleting snapshots
+
+ Use qemu-img snapshot -d
+ SNAPSHOT_ID
+ DISK_IMAGE to delete old or unneeded
+ snapshots of a virtual machine. This saves disk space inside the
+ qcow2 disk image, as the space occupied by the
+ snapshot data is restored:
+
+&prompt.user;qemu-img snapshot -d 2 /images/sles.qcow2
+
+
+
+
+ Manipulate disk images effectively
+
+ Imagine the following real-life situation: you are a server
+ administrator who runs and manages several virtualized operating
+ systems. One group of these systems is based on one specific
+ distribution, while another group (or groups) is based on different
+ versions of the distribution or even on a different (and maybe
+ non-Unix) platform. To make the case even more complex, individual
+ virtual guest systems based on the same distribution differ according
+ to the department and deployment. A file server typically uses a
+ different setup and services than a Web server does, while both may
+ still be based on &slsreg;.
+
+
+ With &qemu; it is possible to create base disk images.
+ You can use them as template virtual machines. These base images save
+ you plenty of time because you do not need to install the same
+ operating system more than once.
+
+
+ Base and derived images
+
+ First, build a disk image as usual and install the target system on
+ it. For more information, see
+ and
+ . Then build a
+ new image while using the first one as a base image. The base image
+ is also called a backing file. After your new
+ derived image is built, never boot the base
+ image again, but boot the derived image instead. Several derived
+ images may depend on one base image at the same time. Therefore,
+ changing the base image can damage the dependencies. While using your
+ derived image, &qemu; writes changes to it and uses the base image
+ only for reading.
+
+
+ It is a good practice to create a base image from a freshly installed
+ (and, if needed, registered) operating system with no patches applied
+ and no additional applications installed or removed. Later on, you
+ can create another base image with the latest patches applied and
+ based on the original base image.
+
+
+
+ Creating derived images
+
+
+ While you can use the raw format for base
+ images, you cannot use it for derived images because the
+ raw format does not support the
+ backing_file option. Use, for example, the
+ qcow2 format for the derived images.
+
+
+
+ For example, /images/sles_base.raw is the base
+ image holding a freshly installed system.
+
+&prompt.user;qemu-img info /images/sles_base.raw
+image: /images/sles_base.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 2.4G
+
+ The image's reserved size is 4 GB, the actual size is 2.4 GB, and its
+ format is raw. Create an image derived from the
+ /images/sles_base.raw base image with:
+
+&prompt.user;qemu-img create -f qcow2 /images/sles_derived.qcow2 \
+-o backing_file=/images/sles_base.raw
+Formatting '/images/sles_derived.qcow2', fmt=qcow2 size=4294967296 \
+backing_file='/images/sles_base.raw' encryption=off cluster_size=0
+
+
+ Look at the derived image details:
+
+&prompt.user;qemu-img info /images/sles_derived.qcow2
+image: /images/sles_derived.qcow2
+file format: qcow2
+virtual size: 4.0G (4294967296 bytes)
+disk size: 140K
+cluster_size: 65536
+backing file: /images/sles_base.raw \
+(actual path: /images/sles_base.raw)
+
+ Although the reserved size of the derived image is the same as the
+ size of the base image (4 GB), the actual size is only 140 KB. The
+ reason is that only changes made to the system inside the derived
+ image are saved. Run the derived virtual machine, register it, if
+ needed, and apply the latest patches. Do any other changes in the
+ system, such as removing unneeded or installing new software packages.
+ Then shut the &vmguest; down and examine its details once more:
+
+&prompt.user;qemu-img info /images/sles_derived.qcow2
+image: /images/sles_derived.qcow2
+file format: qcow2
+virtual size: 4.0G (4294967296 bytes)
+disk size: 1.1G
+cluster_size: 65536
+backing file: /images/sles_base.raw \
+(actual path: /images/sles_base.raw)
+
+ The disk size value has grown to 1.1 GB, which is
+ the disk space occupied by the changes on the file system compared to
+ the base image.
+
+
+
+ Rebasing derived images
+
+ After you have modified the derived image (applied patches, installed
+ specific applications, changed environment settings, etc.), it
+ reaches the desired state. At that point, you can merge the original
+ base image and the derived image to create a new base image.
+
+
+ Your original base image (/images/sles_base.raw)
+ holds a freshly installed system. It can be a template for new
+ modified base images, while the new one can contain the same system
+ as the first one plus all security and update patches applied, for
+ example. After you have created this new base image, you can use it
+ as a template for more specialized derived images as well. The new
+ base image becomes independent of the original one. The process of
+ creating base images from derived ones is called
+ rebasing:
+
+&prompt.user;qemu-img convert /images/sles_derived.qcow2 \
+-O raw /images/sles_base2.raw
+
+ This command created the new base image
+ /images/sles_base2.raw using the
+ raw format.
+
+&prompt.user;qemu-img info /images/sles_base2.raw
+image: /images/sles11_base2.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 2.8G
+
+ The new image is 0.4 gigabytes larger than the original base image.
+ It uses no backing file, and you can easily create new derived images
+ based on it. This lets you create a sophisticated hierarchy of
+ virtual disk images for your organization, saving a lot of time and
+ work.
+
+
+
+ Mounting an image on a &vmhost;
+
+ It can be useful to mount a virtual disk image on the host system.
+
+
+
+ Linux systems can mount an internal partition of a
+ raw disk image using a loopback device. The first
+ example procedure is more complex but more illustrative, while the
+ second one is straightforward:
+
+
+ Mounting a disk image by calculating the partition offset
+
+
+ Set a loop device on the disk image whose
+ partition you want to mount.
+
+&prompt.user;losetup /dev/loop0 /images/sles_base.raw
+
+
+
+ Find the sector size and the starting
+ sector number of the partition you want to
+ mount.
+
+&prompt.user;fdisk -lu /dev/loop0
+
+Disk /dev/loop0: 4294 MB, 4294967296 bytes
+255 heads, 63 sectors/track, 522 cylinders, total 8388608 sectors
+Units = sectors of 1 * 512 = 512 bytes
+Disk identifier: 0x000ceca8
+
+ Device Boot Start End Blocks Id System
+/dev/loop0p1 63 1542239 771088+ 82 Linux swap
+/dev/loop0p2 * 1542240 8385929 3421845 83 Linux
+
+
+
+ The disk sector size.
+
+
+
+
+ The starting sector of the partition.
+
+
+
+
+
+
+ Calculate the partition start offset:
+
+
+ sector_size * sector_start = 512 * 1542240 = 789626880
+
+
+
+
+
+ Delete the loop and mount the partition inside the disk image
+ with the calculated offset on a prepared directory.
+
+&prompt.user;losetup -d /dev/loop0
+&prompt.user;mount -o loop,offset=789626880 \
+/images/sles_base.raw /mnt/sles/
+&prompt.user;ls -l /mnt/sles/
+total 112
+drwxr-xr-x 2 root root 4096 Nov 16 10:02 bin
+drwxr-xr-x 3 root root 4096 Nov 16 10:27 boot
+drwxr-xr-x 5 root root 4096 Nov 16 09:11 dev
+[...]
+drwxrwxrwt 14 root root 4096 Nov 24 09:50 tmp
+drwxr-xr-x 12 root root 4096 Nov 16 09:16 usr
+drwxr-xr-x 15 root root 4096 Nov 16 09:22 var
+
+
+
+
+ Copy one or more files onto the mounted partition and unmount it
+ when finished.
+
+&prompt.user;cp /etc/X11/xorg.conf /mnt/sles/root/tmp
+&prompt.user;ls -l /mnt/sles/root/tmp
+&prompt.user;umount /mnt/sles/
+
+
+
+ Do not write to images that are currently in use
+
+ Never mount a partition of an image of a running virtual machine in
+ read-write mode. This could corrupt the
+ partition and break the whole &vmguest;.
+
+
+
+
+
+
diff --git a/references/qemu_host_installation.xml b/references/qemu_host_installation.xml
new file mode 100644
index 000000000..9c6eb0704
--- /dev/null
+++ b/references/qemu_host_installation.xml
@@ -0,0 +1,672 @@
+
+
+ %entities;
+]>
+
+
+ Setting up a &kvm; &vmhost;
+
+
+
+ yes
+
+
+
+ This section documents how to set up and use &productname; &productnumber;
+ as a &qemu;-&kvm; based virtual machine host.
+
+
+ Resources
+
+ The virtual guest system needs the same hardware resources as if it were
+ installed on a physical machine. The more guests you plan to run on the
+ host system, the more hardware resources—CPU, disk, memory and
+ network—you need to add to the &vmhost;.
+
+
+
+ CPU support for virtualization
+
+
+ To run &kvm;, your CPU must support virtualization, and virtualization
+ needs to be enabled in the BIOS. The file /proc/cpuinfo
+ includes information about your CPU features.
+
+
+
+ To find out whether your system supports virtualization, see the section Architecture
+ Support in the article Virtualization
+ Limits and Support.
+
+
+
+ Required software
+
+
+ The &kvm; host requires several packages to be installed. To install all
+ necessary packages, do the following:
+
+
+
+
+
+ Install the patterns-server-kvm_server and patterns-server-kvm_tools.
+
+
+
+
+ Create a Network Bridge. If you do
+ not plan to dedicate an additional physical network card to your
+ virtual guests, network bridging is a standard way to connect the guest
+ machines to the network.
+
+
+
+
+ After all the required packages are installed (and the new network setup
+ activated), try to load the &kvm; kernel module relevant for your CPU
+ type—kvm_intel or
+ kvm_amd:
+
+&prompt.root;modprobe kvm_amd
+
+ Check if the module is loaded into memory:
+
+ &prompt.user;lsmod | grep kvm
+kvm_amd 237568 20
+kvm 1376256 17 kvm_amd
+
+ Now the &kvm; host is ready to serve &kvm; &vmguest;s.
+
+
+
+
+
+ &kvm; host-specific features
+
+
+ You can improve the performance of &kvm;-based &vmguest;s by letting them
+ fully use specific features of the &vmhost;'s hardware (paravirtualization). This section introduces
+ techniques that allow guests to access the physical host's hardware directly, bypassing the emulation layer for optimal performance.
+
+
+
+
+ Examples included in this section assume basic knowledge of the
+ qemu-system-ARCH command-line options.
+
+
+
+
+ Using the host storage with virtio-scsi
+
+ virtio-scsi is an advanced storage stack for
+ &kvm;. It replaces the former virtio-blk stack
+ for SCSI device pass-through. It has several advantages over
+ virtio-blk:
+
+
+
+ Improved scalability
+
+
+ &kvm; guests have a limited number of PCI controllers, which
+ results in a limited number of attached devices.
+ virtio-scsi solves this limitation by
+ grouping multiple storage devices on a single controller. Each
+ device on a virtio-scsi controller is
+ represented as a logical unit, or LUN.
+
+
+
+
+ Standard command set
+
+
+ virtio-blk uses a small set of commands
+ that need to be known to both the
+ virtio-blk driver and the virtual
+ machine monitor, and so introducing a new command requires
+ updating both the driver and the monitor.
+
+
+ By comparison, virtio-scsi does not
+ define commands, but rather a transport protocol for these
+ commands following the industry-standard SCSI specification. This
+ approach is shared with other technologies, such as Fibre
+ Channel, ATAPI and USB devices.
+
+
+
+
+ Device naming
+
+
+ virtio-blk devices are presented inside
+ the guest as
+ /dev/vdX, which
+ is different from device names in physical systems and may cause
+ migration problems.
+
+
+ virtio-scsi keeps the device names
+ identical to those on physical systems, making the virtual
+ machines easily relocatable.
+
+
+
+
+ SCSI device pass-through
+
+
+ For virtual disks backed by a whole LUN on the host, it is
+ preferable for the guest to send SCSI commands directly to the
+ LUN (pass-through). This is limited in
+ virtio-blk, as guests need to use the
+ virtio-blk protocol instead of SCSI command pass-through, and,
+ moreover, it is not available for Windows guests.
+ virtio-scsi natively removes these
+ limitations.
+
+
+
+
+
+ virtio-scsi usage
+
+ &kvm; supports the SCSI pass-through feature with the
+ virtio-scsi-pci device:
+
+&prompt.root;qemu-system-x86_64 [...] \
+-device virtio-scsi-pci,id=scsi
+
+
+
+
+ Accelerated networking with vhost-net
+
+ The vhost-net module is used to accelerate
+ &kvm;'s paravirtualized network drivers. It provides better latency and
+ greater network throughput. Use the vhost-net driver
+ by starting the guest with the following example command line:
+
+&prompt.root;qemu-system-x86_64 [...] \
+-netdev tap,id=guest0,vhost=on,script=no \
+-net nic,model=virtio,netdev=guest0,macaddr=00:16:35:AF:94:4B
+
+ guest0 is an identification string of the
+ vhost-driven device.
+
+
+
+
+ Scaling network performance with multiqueue virtio-net
+
+ As the number of virtual CPUs increases in &vmguest;s, &qemu; offers a
+ way of improving network performance using
+ multiqueue. Multiqueue virtio-net scales
+ network performance by allowing &vmguest; virtual CPUs to transfer
+ packets in parallel. Multiqueue support is required on both the
+ &vmhost; and &vmguest; sides.
+
+
+ Performance benefit
+
+ The multiqueue virtio-net solution is most beneficial in the
+ following cases:
+
+
+
+
+ Network traffic packets are large.
+
+
+
+
+ &vmguest; has many connections active at the same time, mainly
+ between the guest systems, or between the guest and the host, or
+ between the guest and an external system.
+
+
+
+
+ The number of active queues is equal to the number of virtual
+ CPUs in the &vmguest;.
+
+
+
+
+
+
+ While multiqueue virtio-net increases the total network throughput,
+ it increases CPU consumption as it uses the virtual CPU's power.
+
+
+
+ How to enable multiqueue virtio-net
+
+ The following procedure lists important steps to enable the
+ multiqueue feature with qemu-system-ARCH. It
+ assumes that a tap network device with multiqueue capability
+ (supported since kernel version 3.8) is set up on the &vmhost;.
+
+
+
+ In qemu-system-ARCH, enable multiqueue for the
+ tap device:
+
+-netdev tap,vhost=on,queues=2*N
+
+ where N stands for the number of queue pairs.
+
+
+
+
+ In qemu-system-ARCH, enable multiqueue and
+ specify MSI-X (Message Signaled Interrupt) vectors for the
+ virtio-net-pci device:
+
+-device virtio-net-pci,mq=on,vectors=2*N+2
+
+ where the formula for the number of MSI-X vectors results from: N
+ vectors for TX (transmit) queues, N for RX (receive) queues, one
+ for configuration purposes, and one for possible VQ (vector
+ quantization) control.
+
+
+
+
+ In the &vmguest;, enable multiqueue on the relevant network interface
+ (eth0 in this example):
+
+&prompt.sudo;ethtool -L eth0 combined 2*N
+
+
+
+ The resulting qemu-system-ARCH command line looks
+ similar to the following example:
+
+qemu-system-x86_64 [...] -netdev tap,id=guest0,queues=8,vhost=on \
+-device virtio-net-pci,netdev=guest0,mq=on,vectors=10
+
+ The id of the network device
+ (guest0) needs to be identical for both options.
+
+
+ Inside the running &vmguest;, specify the following command with
+ &rootuser; privileges:
+
+&prompt.sudo;ethtool -L eth0 combined 8
+
+ Now the guest system networking uses the multiqueue support from the
+ qemu-system-ARCH hypervisor.
+
+
+
+
+ VFIO: secure direct access to devices
+
+ Directly assigning a PCI device to a &vmguest; (PCI pass-through)
+ avoids performance issues caused by avoiding any emulation in
+ performance-critical paths. VFIO replaces the traditional &kvm;
+ &pciback; device assignment. A prerequisite for this feature is a
+ &vmhost; configuration as described in
+ .
+
+
+ To be able to assign a PCI device via VFIO to a &vmguest;, you need to
+ find out which IOMMU Group it belongs to. The
+ (input/output memory
+ management unit that connects a direct memory access-capable I/O bus to
+ the main memory) API supports the notion of groups. A group is a set of
+ devices that can be isolated from all other devices in the system.
+ Groups are therefore the unit of ownership used by
+ .
+
+
+ Assigning a PCI device to a &vmguest; via VFIO
+
+
+ Identify the host PCI device to assign to the guest.
+
+&prompt.sudo;lspci -nn
+[...]
+00:10.0 Ethernet controller [0200]: Intel Corporation 82576 \
+Virtual Function [8086:10ca] (rev 01)
+[...]
+
+ Note down the device ID, 00:10.0 in this example,
+ and the vendor ID (8086:10ca).
+
+
+
+
+ Find the IOMMU group of this device:
+
+&prompt.sudo;readlink /sys/bus/pci/devices/0000\:00\:10.0/iommu_group
+../../../kernel/iommu_groups/20
+
+ The IOMMU group for this device is 20. Now you
+ can check the devices belonging to the same IOMMU group:
+
+&prompt.sudo;ls -l /sys/bus/pci/devices/0000\:01\:10.0/iommu_group/devices/
+[...] 0000:00:1e.0 -> ../../../../devices/pci0000:00/0000:00:1e.0
+[...] 0000:01:10.0 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:01:10.0
+[...] 0000:01:10.1 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:01:10.1
+
+
+
+ Unbind the device from the device driver:
+
+&prompt.sudo;echo "0000:01:10.0" > /sys/bus/pci/devices/0000\:01\:10.0/driver/unbind
+
+
+
+ Bind the device to the vfio-pci driver using the vendor ID from
+ step 1:
+
+&prompt.sudo;echo "8086 153a" > /sys/bus/pci/drivers/vfio-pci/new_id
+
+ A new device
+ /dev/vfio/IOMMU_GROUP
+ is created as a result, /dev/vfio/20 in this
+ case.
+
+
+
+
+ Change the ownership of the newly created device:
+
+&prompt.sudo;chown qemu.qemu /dev/vfio/DEVICE
+
+
+
+ Now run the &vmguest; with the PCI device assigned.
+
+&prompt.sudo;qemu-system-ARCH [...] -device
+ vfio-pci,host=00:10.0,id=ID
+
+
+
+ No hotplugging
+
+ As of &productname; &productnumber;, hotplugging of PCI devices
+ passed to a &vmguest; via VFIO is not supported.
+
+
+
+ You can find more detailed information on the
+ driver in the
+ /usr/src/linux/Documentation/vfio.txt file
+ (package kernel-source needs to be installed).
+
+
+
+
+ VirtFS: sharing directories between host and guests
+
+ &vmguest;s normally run in a separate computing space—they are
+ provided their own memory range, dedicated CPUs, and file system space.
+ The ability to share parts of the &vmhost;'s file system makes the
+ virtualization environment more flexible by simplifying mutual data
+ exchange. Network file systems, such as CIFS and NFS, have been the
+ traditional way of sharing directories. But as they are not
+ specifically designed for virtualization purposes, they suffer from
+ major performance and feature issues.
+
+
+
+ SELinux Requirement: For security_model=mapped,
+ configure SELinux context:
+
+ &prompt.root; semanage fcontext -a -t virtiofsd_t "/tmp(/.*)?"
+&prompt.root; restorecon -Rv /tmp
+
+
+
+ Host Configuration
+
+ Nothing needs to be done on the host side aside from installing
+ virtiofsd. The &vmguest; xml libvirt
+ file should have a configuration like:
+
+
+<filesystem type="virtiofs" accessmode="mapped"/>
+ <driver="virtiofs"/>
+ <source dir="/tmp"/>
+ <target dir="host_tmp"/>
+ <alias name="fs0"/>
+ <address type="pci" domain="0x0000" bus="0x01" slot="0x00" function="0x0"/>
+</filesystem>
+
+ 9p is deprecated
+
+ 9p Protocol is a legacy solution with critical flaws. Moreover,
+ it incurs ~30-50% higher CPU overhead than virtiofs
+ for sequential I/O due to constant context switching between user and kernel space.
+
+
+
+
+ Access Mode Options for virtiofs
+
+ The accessmode attribute in the <filesystem> element
+ defines how guest file permissions map to host permissions. Only two values are valid:
+
+
+ virtiofs Access Mode Options
+
+
+
+
+
+
+ accessmode
+ Description
+ Security Implications
+
+
+
+
+ mapped
+
+
+ The default mode. Maps guest UIDs/GIDs to host UIDs/GIDs using a translation table.
+ Guest files appear as if owned by a dedicated "virtiofs" user on the host (typically UID 1000).
+
+
+ Example: Guest user uid=1000 writes to host /tmp →
+ Host file appears as owned by virtiofsd user (not the guest user).
+
+
+
+
+ Recommended for all environments.
+
+
+ Prevents guest users from directly accessing host user accounts.
+
+
+ Does not require matching host users.
+
+
+
+
+ passthrough
+
+
+ Guest UIDs/GIDs are used directly on the host. The guest must have matching users on the host.
+
+
+ Example: Guest user uid=1000 writes to host /tmp →
+ Host file appears as owned by the user with uid=1000.
+
+
+
+
+ Only for trusted guests (e.g., same-tenant cloud environments).
+
+
+ Requires matching host users (e.g., host must have useradd -u 1000 guestuser).
+
+
+ Security risk: Compromised guest can directly access host user accounts.
+
+
+
+
+ none
+
+
+ Invalid value (common documentation error).
+
+
+ virtiofsd rejects this option with error:
+ security_model=none is not supported.
+
+
+ Always use mapped (default) or passthrough.
+
+
+
+
+ Causes immediate configuration failure.
+
+
+
+
+
+
+
+
+ Key Configuration Rule:
+ accessmode='mapped' must match the host's
+ security_model=mapped in virtiofsd.
+ Mismatched modes cause mount failures with errors like:
+ Failed to set security context: Operation not permitted.
+
+
+
+
+
+ Guest Configuration
+
+ On the guest, load the kernel module and mount the file system:
+
+
+ Guest Mount Command
+ The virtiofs module should be loaded automatically, if not do:
+ &prompt.root; modprobe virtiofs
+ Now you can mount the target directory on your &vmguest;:
+ &prompt.root; mount -t virtiofs -o dax host_tmp /mnt/hosttmp
+
+ Options:
+
+
+
+ virtiofs
+
+
+ dax: Enables direct access for performance (recommended).
+ dax cannot be used with the BTRFS file system.
+
+
+ host_tmp: The target directory in the &vmguest; configuration.
+
+
+
+
+
+ Persistent mounts virtiofs across reboot
+ Simply add this line to /etc/fstab:
+ host_tmp /mnt/hosttmp virtiofs rw,nofail 0 0
+
+
+ Troubleshooting Common Issues
+
+
+
+ Guest Kernel Check: verify the module virtiofs is loaded:
+
+ &prompt.root; lsmod | grep virtiofs
+
+
+ Permission denied: Check SELinux context (see )
+
+
+ Mount fails with 9p: Verify you used -t virtiofs (not 9p)
+
+
+ Guest writes not syncing: Add cache=none to mount options
+
+
+
+
+
+
+ KSM: sharing memory pages between guests
+
+ Kernel Same Page Merging () is a
+ Linux kernel feature that merges identical memory pages from multiple
+ running processes into one memory region. Because &kvm; guests run as
+ processes under Linux, provides
+ the memory overcommit feature to hypervisors for more efficient use of
+ memory. Therefore, if you need to run multiple virtual machines on a
+ host with limited memory, may be
+ helpful to you.
+
+
+ stores its status information in
+ the files under the /sys/kernel/mm/ksm directory:
+
+&prompt.user;ls -1 /sys/kernel/mm/ksm
+full_scans
+merge_across_nodes
+pages_shared
+pages_sharing
+pages_to_scan
+pages_unshared
+pages_volatile
+run
+sleep_millisecs
+
+ For more information on the meaning of the
+ /sys/kernel/mm/ksm/* files, see
+ /usr/src/linux/Documentation/vm/ksm.txt (package
+ kernel-source).
+
+
+ To use , do the following.
+
+
+
+
+ Although &productnameshort; includes
+ support in the kernel, it is
+ disabled by default. To enable it, run the following command:
+
+&prompt.root;echo 1 > /sys/kernel/mm/ksm/run
+
+
+
+ Now run several &vmguest;s under &kvm; and inspect the content of
+ files pages_sharing and
+ pages_shared, for example:
+
+&prompt.user;while [ 1 ]; do cat /sys/kernel/mm/ksm/pages_shared; sleep 1; done
+13522
+13523
+13519
+13518
+13520
+13520
+13528
+
+
+
+
+
diff --git a/references/qemu_monitor.xml b/references/qemu_monitor.xml
new file mode 100644
index 000000000..21f1bea65
--- /dev/null
+++ b/references/qemu_monitor.xml
@@ -0,0 +1,1110 @@
+
+
+ %entities;
+]>
+
+
+ Virtual machine administration using &qemu; monitor
+
+
+
+ yes
+
+
+
+ 2025-12-02
+
+
+
+
+
+
+
+ When a virtual machine is invoked by the &qemusystemarch; command, for
+ example, qemu-system-x86_64, a monitor console is
+ provided for performing interaction with the user. Using the commands
+ available in the monitor console, it is possible to inspect the running
+ operating system, change removable media, take screenshots or audio grabs
+ and control other aspects of the virtual machine.
+
+
+
+ The following sections list selected useful &qemu; monitor commands and
+ their purpose. To get the full list, enter help in the
+ &qemu; monitor command line.
+
+
+
+ Accessing monitor console
+
+
+ No monitor console for &libvirt;
+
+ You can access the monitor console only if you started the virtual
+ machine directly with the &qemusystemarch; command and are viewing its
+ graphical output in a built-in &qemu; window.
+
+
+ If you started the virtual machine with &libvirt;, for example, using
+ virt-manager, and are viewing its output via VNC or
+ Spice sessions, you cannot access the monitor console directly. You
+ can, however, send the monitor command to the virtual machine via
+ &virsh;:
+
+&prompt.root;virsh qemu-monitor-command COMMAND
+
+
+
+ The way you access the monitor console depends on which display device
+ you use to view the output of a virtual machine. Find more details about
+ displays in .
+ For example, to view the monitor while the
+ option is in use, press
+ 2. Similarly, when
+ the option is in use, you can switch to the
+ monitor console by pressing the following key combination:
+ AC.
+
+
+
+ To get help while using the console, use help or
+ ?. To get help for a specific command, use
+ helpCOMMAND.
+
+
+
+ Getting information about the guest system
+
+
+ To get information about the guest system, use info.
+ If used without any option, a list of possible options is printed.
+ Options determine which part of the system is analyzed:
+
+
+
+
+ info version
+
+
+ Shows the version of &qemu;.
+
+
+
+
+ info commands
+
+
+ Lists available QMP commands.
+
+
+
+
+ info network
+
+
+ Shows the network state.
+
+
+
+
+ info chardev
+
+
+ Shows the character devices.
+
+
+
+
+ info block
+
+
+ Information about block devices, such as hard disks, floppy drives,
+ or CD-ROMs.
+
+
+
+
+ info blockstats
+
+
+ Read and write statistics on block devices.
+
+
+
+
+ info registers
+
+
+ Shows the CPU registers.
+
+
+
+
+ info cpus
+
+
+ Shows information about available CPUs.
+
+
+
+
+ info history
+
+
+ Shows the command-line history.
+
+
+
+
+ info irq
+
+
+ Shows the interrupt statistics.
+
+
+
+
+ info pic
+
+
+ Shows the i8259 (PIC) state.
+
+
+
+
+ info pci
+
+
+ Shows the PCI information.
+
+
+
+
+ info tlb
+
+
+ Shows virtual to physical memory mappings.
+
+
+
+
+ info mem
+
+
+ Shows the active virtual memory mappings.
+
+
+
+
+ info jit
+
+
+ Shows dynamic compiler information.
+
+
+
+
+ info kvm
+
+
+ Shows the &kvm; information.
+
+
+
+
+ info numa
+
+
+ Shows the NUMA information.
+
+
+
+
+ info usb
+
+
+ Shows the guest USB devices.
+
+
+
+
+ info usbhost
+
+
+ Shows the host USB devices.
+
+
+
+
+ info profile
+
+
+ Shows the profiling information.
+
+
+
+
+ info capture
+
+
+ Shows the capture (audio grab) information.
+
+
+
+
+ info snapshots
+
+
+ Shows the currently saved virtual machine snapshots.
+
+
+
+
+ info status
+
+
+ Shows the current virtual machine status.
+
+
+
+
+ info mice
+
+
+ Shows which guest mice are receiving events.
+
+
+
+
+ info vnc
+
+
+ Shows the VNC server status.
+
+
+
+
+ info name
+
+
+ Shows the current virtual machine name.
+
+
+
+
+ info uuid
+
+
+ Shows the current virtual machine's UUID.
+
+
+
+
+ info usernet
+
+
+ Shows the user network stack connection states.
+
+
+
+
+ info migrate
+
+
+ Shows the migration status.
+
+
+
+
+ info balloon
+
+
+ Shows the balloon device information.
+
+
+
+
+ info qtree
+
+
+ Shows the device tree.
+
+
+
+
+ info qdm
+
+
+ Shows the qdev device model list.
+
+
+
+
+ info roms
+
+
+ Shows the ROMs.
+
+
+
+
+ info migrate_cache_size
+
+
+ Shows the current migration xbzrle (Xor Based Zero Run
+ Length Encoding) cache size.
+
+
+
+
+ info migrate_capabilities
+
+
+ Shows the status of the multiple migration capabilities, such as
+ xbzrle compression.
+
+
+
+
+ info mtree
+
+
+ Shows the &vmguest; memory hierarchy.
+
+
+
+
+ info trace-events
+
+
+ Shows available trace-events and their status.
+
+
+
+
+
+
+ Changing VNC password
+
+
+ To change the VNC password, use the change vnc
+ password command and enter the new password:
+
+
+
+(qemu) change vnc password
+Password: ********
+(qemu)
+
+
+
+ Managing devices
+
+
+ To add a new disk while the guest is running (hotplug), use the
+ drive_add and device_add commands.
+ First define a new drive to be added as a device to bus 0:
+
+
+(qemu) drive_add 0 if=none,file=/tmp/test.img,format=raw,id=disk1
+OK
+
+
+ You can confirm your new device by querying the block subsystem:
+
+
+(qemu) info block
+[...]
+disk1: removable=1 locked=0 tray-open=0 file=/tmp/test.img ro=0 drv=raw \
+encrypted=0 bps=0 bps_rd=0 bps_wr=0 iops=0 iops_rd=0 iops_wr=0
+
+
+ After the new drive is defined, it needs to be connected to a device so
+ that the guest can see it. The typical device would be a
+ virtio-blk-pci or scsi-disk. To get
+ the full list of available values, run:
+
+
+(qemu) device_add ?
+name "VGA", bus PCI
+name "usb-storage", bus usb-bus
+[...]
+name "virtio-blk-pci", bus virtio-bus
+
+
+ Now add the device
+
+
+(qemu) device_add virtio-blk-pci,drive=disk1,id=myvirtio1
+
+
+ and confirm with
+
+
+(qemu) info pci
+[...]
+Bus 0, device 4, function 0:
+ SCSI controller: PCI device 1af4:1001
+ IRQ 0.
+ BAR0: I/O at 0xffffffffffffffff [0x003e].
+ BAR1: 32 bit memory at 0xffffffffffffffff [0x00000ffe].
+ id "myvirtio1"
+
+
+
+ Devices added with the device_add command can be
+ removed from the guest with device_del. Enter
+ help device_del on the &qemu; monitor command line
+ for more information.
+
+
+
+
+ To release the device or file connected to the removable media device,
+ use the ejectDEVICE
+ command. Use the optional to force ejection.
+
+
+
+ To change removable media (like CD-ROMs), use the
+ changeDEVICE command. The
+ name of the removable media can be determined using the info
+ block command:
+
+
+
+(qemu) info block
+ide1-cd0: type=cdrom removable=1 locked=0 file=/dev/sr0 ro=1 drv=host_device
+(qemu) change ide1-cd0 /path/to/image
+
+
+
+ Controlling keyboard and mouse
+
+
+ It is possible to use the monitor console to emulate keyboard and mouse
+ input if necessary. For example, if your graphical user interface
+ intercepts certain key combinations at a low level (such as
+ F1
+ in X Window System), you can still enter them using the
+ sendkeyKEYS:
+
+
+sendkey ctrl-alt-f1
+
+
+ To list the key names used in the KEYS option,
+ enter sendkey and press .
+
+
+
+ To control the mouse, the following commands can be used:
+
+
+
+
+ mouse_moveDXdy [DZ]
+
+
+ Move the active mouse pointer to the specified coordinates dx, dy
+ with the optional scroll axis dz.
+
+
+
+
+ mouse_buttonVAL
+
+
+ Change the state of the mouse buttons (1=left, 2=middle, 4=right).
+
+
+
+
+ mouse_setINDEX
+
+
+ Set which mouse device receives events. Device index numbers can be
+ obtained with the info mice command.
+
+
+
+
+
+
+ Changing available memory
+
+
+ If the virtual machine was started with the option (the paravirtualized balloon device is therefore
+ enabled), you can change the available memory dynamically. For more
+ information about enabling the balloon device, see
+ .
+
+
+
+ To get information about the balloon device in the monitor console and to
+ determine whether the device is enabled, use the info
+ balloon command:
+
+
+(qemu) info balloon
+
+
+ If the balloon device is enabled, use the balloon
+ MEMORY_IN_MB command to set the requested
+ amount of memory:
+
+
+(qemu) balloon 400
+
+
+ Dumping virtual machine memory
+
+
+ To save the content of the virtual machine memory to a disk or console
+ output, use the following commands:
+
+
+
+
+ memsaveADDRSIZEFILENAME
+
+
+ Saves a virtual memory dump starting at
+ ADDR of size
+ SIZE to file
+ FILENAME
+
+
+
+
+ pmemsaveADDRSIZEFILENAME
+
+
+ Saves a physical memory dump starting at
+ ADDR of size
+ SIZE to file
+ FILENAME-
+
+
+
+
+ x /FMTADDR
+
+
+ Makes a virtual memory dump starting at address
+ ADDR and formatted according to the
+ FMT string. The
+ FMT string consists of three parameters
+ COUNTFORMATSIZE:
+
+
+ The COUNT parameter is the number of
+ items to be dumped.
+
+
+ The FORMAT can be x
+ (hex), d (signed decimal), u
+ (unsigned decimal), o (octal),
+ c (char) or i (assembly
+ instruction).
+
+
+ The SIZE parameter can be
+ b (8 bits), h (16 bits),
+ w (32 bits) or g (64 bits).
+ On x86, h or w can be
+ specified with the i format to respectively
+ select 16 or 32-bit code instruction size.
+
+
+
+
+ xp /FMTADDR
+
+
+ Makes a physical memory dump starting at address
+ ADDR and formatted according to the
+ FMT string. The
+ FMT string consists of three parameters
+ COUNTFORMATSIZE:
+
+
+ The COUNT parameter is the number of
+ items to be dumped.
+
+
+ The FORMAT can be x
+ (hex), d (signed decimal), u
+ (unsigned decimal), o (octal),
+ c (char) or i (asm
+ instruction).
+
+
+ The SIZE parameter can be
+ b (8 bits), h (16 bits),
+ w (32 bits) or g (64 bits).
+ On x86, h or w can be
+ specified with the i format to respectively
+ select 16 or 32-bit code instruction size.
+
+
+
+
+
+
+ Managing virtual machine snapshots
+
+
+ Managing snapshots in &qemu; monitor is not supported by &suse;
+ yet. The information found in this section may be helpful in specific
+ cases.
+
+
+
+ snapshots are snapshots of the complete
+ virtual machine, including the state of the CPU, RAM, and the content of all
+ writable disks. To use virtual machine snapshots, you must have at least
+ one non-removable and writable block device using the
+ qcow2 disk image format.
+
+
+
+ Snapshots are helpful when you need to save your virtual machine in a
+ particular state. For example, after you have configured network services
+ on a virtualized server and want to quickly start the virtual machine in
+ the same state that was saved last. You can also create a snapshot after
+ the virtual machine has been powered off to create a backup state before
+ you try something experimental and make the &vmguest; unstable. This section
+ introduces the former case, while the latter is described in
+ .
+
+
+
+ The following commands are available for managing snapshots in &qemu;
+ monitor:
+
+
+
+
+ savevmNAME
+
+
+ Creates a new virtual machine snapshot under the tag
+ NAME or replaces an existing snapshot.
+
+
+
+
+ loadvmNAME
+
+
+ Loads a virtual machine snapshot tagged
+ NAME.
+
+
+
+
+ delvm
+
+
+ Deletes a virtual machine snapshot.
+
+
+
+
+ info snapshots
+
+
+ Prints information about available snapshots.
+
+(qemu) info snapshots
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+
+
+
+ Unique auto-incremented identification number of the snapshot.
+
+
+
+
+ Unique description string of the snapshot. It is meant as a
+ human-readable version of the ID.
+
+
+
+
+ The disk space occupied by the snapshot. The more memory is
+ consumed by running applications, the bigger the snapshot is.
+
+
+
+
+ Time and date the snapshot was created.
+
+
+
+
+ The current state of the virtual machine's clock.
+
+
+
+
+
+
+
+
+ Suspending and resuming virtual machine execution
+
+
+ The following commands are available for suspending and resuming virtual
+ machines:
+
+
+
+
+ stop
+
+
+ Suspends the execution of the virtual machine.
+
+
+
+
+ cont
+
+
+ Resumes the execution of the virtual machine.
+
+
+
+
+ system_reset
+
+
+ Resets the virtual machine. The effect is similar to the reset
+ button on a physical machine. This may leave the file system in an
+ unclean state.
+
+
+
+
+ system_powerdown
+
+
+ Sends an shutdown request to the
+ machine. The effect is similar to the power button on a physical
+ machine.
+
+
+
+
+ q or quit
+
+
+ Terminates &qemu; immediately.
+
+
+
+
+
+
+ Live migration
+
+
+ The live migration process allows to transmit any virtual machine from
+ one host system to another host system without any interruption in
+ availability. It is possible to change hosts permanently or only during
+ maintenance.
+
+
+
+ The requirements for live migration:
+
+
+
+
+
+ libvirt requirements apply.
+
+
+
+
+
+ Live migration is only possible between &vmhost;s with the same CPU
+ features.
+
+
+
+
+ interface,
+ feature, and the
+ command line option are not compatible
+ with migration.
+
+
+
+
+ The guest on the source and destination hosts must be started in the
+ same way.
+
+
+
+
+ qemu command line option should not be
+ used for migration (and this qemu command line
+ option is not supported).
+
+
+
+
+
+ Support status
+
+ The postcopy mode is not yet supported in
+ &productname;. It is released as a technology preview only. For more
+ information about postcopy, see
+ .
+
+
+
+
+ More recommendations can be found on the following Web site:
+
+
+
+
+ The live migration process has the following steps:
+
+
+
+
+
+ The virtual machine instance is running on the source host.
+
+
+
+
+ The virtual machine is started on the destination host in frozen
+ listening mode. The parameters used are the same as on the source
+ host plus the
+ parameter, where IP specifies the IP
+ address and PORT specifies the port for
+ listening to the incoming migration. If 0 is set as the IP address, the
+ virtual machine listens on all interfaces.
+
+
+
+
+ On the source host, switch to the monitor console and use the
+ migrate -d tcp:
+ DESTINATION_IP:PORT
+ command to initiate the migration.
+
+
+
+
+ To determine the state of the migration, use the info
+ migrate command in the monitor console on the source host.
+
+
+
+
+ To cancel the migration, use the migrate_cancel
+ command in the monitor console on the source host.
+
+
+
+
+ To set the maximum tolerable downtime for migration in seconds, use
+ the migrate_set_downtime
+ NUMBER_OF_SECONDS command.
+
+
+
+
+ To set the maximum speed for migration in bytes per second, use the
+ migrate_set_speed
+ BYTES_PER_SECOND command.
+
+
+
+
+
+ QMP - &qemu; machine protocol
+
+
+ QMP is a JSON-based protocol that allows applications—such as
+ &libvirt;—to communicate with a running &qemu; instance. There are
+ several ways you can access the &qemu; monitor using QMP commands.
+
+
+
+ Access QMP via standard input/output
+
+ The most flexible way to use QMP is by specifying the
+ option. The following example creates a QMP
+ instance using standard input/output. In the following examples,
+ -> marks lines with commands sent from the client to
+ the running &qemu; instance, while <- marks lines
+ with the output returned from &qemu;.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev stdio,id=mon0 \
+-mon chardev=mon0,mode=control,pretty=on
+
+<- {
+ "QMP": {
+ "version": {
+ "qemu": {
+ "micro": 0,
+ "minor": 0,
+ "major": 2
+ },
+ "package": ""
+ },
+ "capabilities": [
+ ]
+ }
+}
+
+ When a new QMP connection is established, QMP sends its greeting
+ message and enters capabilities negotiation mode. In this mode, only
+ the qmp_capabilities command works. To exit
+ capabilities negotiation mode and enter command mode, the
+ qmp_capabilities command must be issued first:
+
+-> { "execute": "qmp_capabilities" }
+<- {
+ "return": {
+ }
+}
+
+ "return": {} is a QMP's success response.
+
+
+ QMP's commands can have arguments. For example, to eject a CD-ROM drive,
+ enter the following:
+
+->{ "execute": "eject", "arguments": { "device": "ide1-cd0" } }
+<- {
+ "timestamp": {
+ "seconds": 1410353381,
+ "microseconds": 763480
+ },
+ "event": "DEVICE_TRAY_MOVED",
+ "data": {
+ "device": "ide1-cd0",
+ "tray-open": true
+ }
+}
+{
+ "return": {
+ }
+}
+
+
+
+ Access QMP via telnet
+
+ Instead of the standard input/output, you can connect the QMP interface
+ to a network socket and communicate with it via a specified port:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev socket,id=mon0,host=localhost,port=4444,server,nowait \
+-mon chardev=mon0,mode=control,pretty=on
+
+ And then run telnet to connect to port 4444:
+
+&prompt.user;telnet localhost 4444
+Trying ::1...
+Connected to localhost.
+Escape character is '^]'.
+<- {
+ "QMP": {
+ "version": {
+ "qemu": {
+ "micro": 0,
+ "minor": 0,
+ "major": 2
+ },
+ "package": ""
+ },
+ "capabilities": [
+ ]
+ }
+}
+
+ You can create several monitor interfaces at the same time. The
+ following example creates one HMP instance—human monitor which
+ understands normal &qemu; monitor's commands—on the
+ standard input/output, and one QMP instance on localhost port 4444:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev stdio,id=mon0 -mon chardev=mon0,mode=readline \
+-chardev socket,id=mon1,host=localhost,port=4444,server,nowait \
+ -mon chardev=mon1,mode=control,pretty=on
+
+
+
+ Access QMP via Unix socket
+
+ Invoke &qemu; using the option and create a Unix
+ socket:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-qmp unix:/tmp/qmp-sock,server --monitor stdio
+
+&qemu; waiting for connection on: unix:./qmp-sock,server
+
+ To communicate with the &qemu; instance via the
+ /tmp/qmp-sock socket, use nc (see
+ man 1 nc for more information) from another terminal
+ on the same host:
+
+&prompt.sudo;nc -U /tmp/qmp-sock
+<- {"QMP": {"version": {"qemu": {"micro": 0, "minor": 0, "major": 2} [...]
+
+
+
+ Access QMP via &libvirt;'s virsh command
+
+ If you run your virtual machines under &libvirt;, you can communicate with its
+ running guests by running the virsh
+ qemu-monitor-command:
+
+&prompt.sudo;virsh qemu-monitor-command vm_guest1 \
+--pretty '{"execute":"query-kvm"}'
+<- {
+ "return": {
+ "enabled": true,
+ "present": true
+ },
+ "id": "libvirt-8"
+}
+
+ In the above example, we ran the simple command
+ query-kvm, which checks if the host is capable of
+ running &kvm; and if &kvm; is enabled.
+
+
+ Generating human-readable output
+
+ To use the standard human-readable output format of &qemu; instead of
+ the JSON format, use the option:
+
+&prompt.sudo;virsh qemu-monitor-command vm_guest1 --hmp "query-kvm"
+
+
+
+
diff --git a/references/qemu_running_vms_qemukvm.xml b/references/qemu_running_vms_qemukvm.xml
new file mode 100644
index 000000000..d304125f0
--- /dev/null
+++ b/references/qemu_running_vms_qemukvm.xml
@@ -0,0 +1,1978 @@
+
+
+ %entities;
+]>
+
+
+ Running virtual machines with qemu-system
+
+
+
+ yes
+
+
+
+ 2025-12-02
+
+
+
+
+
+
+
+ Once you have a virtual disk image ready (for more information on disk
+ images, see ), you can
+ start the related virtual machine.
+ introduced simple commands
+ to install and run a &vmguest;. This article focuses on a more detailed
+ explanation of qemu-system-ARCH usage and shows
+ solutions for more specific tasks. For a complete list of
+ qemu-system-ARCH's options, see its man page
+ (man 1 qemu).
+
+
+ Basic qemu-system-ARCH invocation
+
+
+ The qemu-system-ARCH command uses the following
+ syntax:
+
+
+qemu-system-ARCH OPTIONS -drive file=DISK_IMAGE
+
+
+
+
+ qemu-system-ARCH understands many options. Most of
+ them define parameters of the emulated hardware, while others affect
+ more general emulator behavior. If you do not supply any options,
+ default values are used, and you need to supply the path to a disk
+ image to be run.
+
+
+
+
+ Path to the disk image holding the guest system you want to
+ virtualize. qemu-system-ARCH supports many image
+ formats. Use qemu-img to
+ list them.
+
+
+
+
+
+ &aarch64; architecture
+
+ &kvm; support is available only for 64-bit Arm® architecture
+ (&aarch64;). Running &qemu; on the &aarch64; architecture requires you
+ to specify:
+
+
+
+
+ A machine type designed for &qemu; Arm® virtual machines using the
+ option.
+
+
+
+
+ A firmware image file using the option.
+
+
+ You can specify the firmware image files alternatively using the
+ options, for example:
+
+
+-drive file=/usr/share/edk2/aarch64/&qemu;_EFI-pflash.raw,if=pflash,format=raw
+-drive file=/var/lib/libvirt/qemu/nvram/opensuse_VARS.fd,if=pflash,format=raw
+
+
+
+
+ A CPU of the &vmhost; using the option
+ (default is ).
+
+
+
+
+ The same Generic Interrupt Controller (GIC) version as the host
+ using the option
+ (default is ).
+
+
+
+
+ If a graphic mode is needed, a graphic device of type
+ virtio-gpu-pci.
+
+
+
+
+ For example:
+
+
+&prompt.sudo;qemu-system-aarch64 [...] \
+ -bios /usr/share/qemu/qemu-uefi-aarch64.bin \
+ -cpu host \
+ -device virtio-gpu-pci \
+ -machine virt,accel=kvm,gic-version=host
+
+
+
+
+ General qemu-system-ARCH options
+
+
+ This section introduces general qemu-system-ARCH
+ options and options related to the basic emulated hardware, such as the
+ virtual machine's processor, memory, model type, or time processing
+ methods.
+
+
+
+
+ -name NAME_OF_GUEST
+
+
+ Specifies the name of the running guest system. The name is
+ displayed in the window caption and used for the VNC server.
+
+
+
+
+ -boot OPTIONS
+
+
+ Specifies the order in which the defined drives are booted. Drives
+ are represented by letters, where a and
+ b stand for the floppy drives 1 and 2,
+ c stands for the first hard disk,
+ d stands for the first CD-ROM drive, and
+ n to p stand for Ether-boot
+ network adapters.
+
+
+ For example, qemu-system-ARCH [...] -boot
+ order=ndc first tries to boot from the network, then from
+ the first CD-ROM drive, and finally from the first hard disk.
+
+
+
+
+ -pidfile FILENAME
+
+
+ Stores &qemu;'s process identification number (PID) in a file.
+ This is useful if you run &qemu; from a script.
+
+
+
+
+ -nodefaults
+
+
+ By default, &qemu; creates basic virtual devices even if you do not
+ specify them on the command line. This option turns this feature
+ off, and you must specify every single device manually, including
+ graphical and network cards, parallel or serial ports, or virtual
+ consoles. Even &qemu; monitor is not attached by default.
+
+
+
+
+ -daemonize
+
+
+ Daemonizes the &qemu; process after it is started.
+ &qemu; detaches from the standard input and standard output after
+ it is ready to receive connections on any of its devices.
+
+
+
+
+
+
+ SeaBIOS BIOS implementation
+
+ SeaBIOS is the default BIOS used. You can boot USB devices, any drive
+ (CD-ROM, Floppy or a hard disk). It has USB mouse and keyboard support
+ and supports multiple VGA cards. For more information about SeaBIOS,
+ refer to the SeaBIOS
+ Web site.
+
+
+
+
+ Basic virtual hardware
+
+
+ Machine type
+
+ You can specify the type of the emulated machine. Run
+ qemu-system-ARCH -M help to view a list of
+ supported machine types.
+
+
+ ISA-PC
+
+ The machine type isapc: ISA-only-PC is
+ unsupported.
+
+
+
+
+ CPU model
+
+ To specify the type of the processor (CPU) model, run
+ qemu-system-ARCH -cpu
+ MODEL. Use qemu-system-ARCH -cpu
+ help to view a list of supported CPU models.
+
+
+
+ Other basic options
+
+ The following is a list of the most commonly used options while launching
+ qemu from the command line. To see all options
+ available, refer to the qemu-doc man page.
+
+
+
+ -m MEGABYTES
+
+
+ Specifies how many megabytes are used for the virtual RAM size.
+
+
+
+
+ -balloon virtio
+
+
+ Specifies a paravirtualized device to dynamically change the
+ amount of virtual RAM assigned to the &vmguest;. The upper limit is
+ the amount of memory specified with -m.
+
+
+
+
+ -smp NUMBER_OF_CPUS
+
+
+ Specifies how many CPUs to emulate. &qemu; supports up to 255
+ CPUs on the PC platform (up to 64 with &kvm; acceleration used).
+ This option also takes other CPU-related parameters, such as
+ number of sockets, the number of sockets, the number of
+ cores per socket, or the number of
+ threads per core.
+
+
+
+
+
+ The following is an example of a working
+ qemu-system-ARCH command line:
+
+
+&prompt.sudo;qemu-system-x86_64 \
+ -name "&productnameshort; &productnumber;" \
+ -M pc-i440fx-2.7 -m 512 \
+ -machine accel=kvm -cpu kvm64 -smp 2 \
+ -drive format=raw,file=/images/sles.raw
+
+
+ &qemu; window with &productnameshort; as &vmguest;
+
+
+
+
+
+
+
+
+
+
+
+ -no-acpi
+
+
+ Disables support.
+
+
+
+
+ -S
+
+
+ &qemu; starts with the CPU stopped. To start the CPU, enter
+ c in &qemu; monitor. For more information,
+ see .
+
+
+
+
+
+
+
+
+ Storing and reading the configuration of virtual devices
+
+
+ -readconfig CFG_FILE
+
+
+ Instead of entering the device configuration options on the
+ command line each time you want to run &vmguest;,
+ qemu-system-ARCH can read them from a file that
+ was either previously saved with -writeconfig
+ or edited manually.
+
+
+
+
+ -writeconfig CFG_FILE
+
+
+ Dumps the current virtual machine's device configuration to a
+ text file. It can consequently be reused with the
+ -readconfig option.
+
+
+&prompt.sudo;qemu-system-x86_64 -name "&productnameshort; &productnumber;" \
+ -machine accel=kvm -M pc-i440fx-2.7 -m 512 -cpu kvm64 \
+ -smp 2 /images/sles.raw -writeconfig /images/sles.cfg
+(exited)
+&prompt.user;cat /images/sles.cfg
+# qemu config file
+
+[drive]
+ index = "0"
+ media = "disk"
+ file = "/images/sles_base.raw"
+
+
+ This way, you can effectively manage the configuration of your
+ virtual machines' devices in a well-arranged manner.
+
+
+
+
+
+
+
+ Guest real-time clock
+
+
+ -rtc OPTIONS
+
+
+ Specifies the way the RTC is handled inside a &vmguest;. By
+ default, the clock of the guest is derived from that of the host
+ system. Therefore, we recommend that the host system clock
+ be synchronized with an accurate external clock, for example, via
+ an NTP service.
+
+
+ If you need to isolate the &vmguest; clock from the host one,
+ specify clock=vm instead of the default
+ clock=host.
+
+
+ You can also specify the initial time of the &vmguest;'s clock
+ with the base option:
+
+&prompt.sudo;qemu-system-x86_64 [...] -rtc clock=vm,base=2010-12-03T01:02:00
+
+ Instead of a time stamp, you can specify utc
+ or localtime. The former instructs &vmguest;
+ to start at the current UTC value (Coordinated Universal Time,
+ see ), while
+ the latter applies the local time setting.
+
+
+
+
+
+
+
+ Using devices in &qemu;
+
+
+ &qemu; virtual machines emulate all devices needed to run a &vmguest;.
+ &qemu; supports, for example, several types of network cards, block
+ devices (hard and removable drives), USB devices, character devices
+ (serial and parallel ports), or multimedia devices (graphic and sound
+ cards). This section introduces options for configuring multiple types of
+ supported devices.
+
+
+
+
+ If your device, such as -drive, needs a special
+ driver and driver properties to be set, specify them with the
+ -device option, and identify with
+ drive= suboption. For example:
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive if=none,id=drive0,format=raw \
+-device virtio-blk-pci,drive=drive0,scsi=off ...
+
+ To get help on available drivers and their properties, use
+ and .
+
+
+
+
+ Block devices
+
+ Block devices are vital for virtual machines. These are fixed or
+ removable storage media called drives. One of the
+ connected hard disks typically holds the guest operating system to be
+ virtualized.
+
+
+ drives are defined with
+ -drive. This option has many suboptions, some of
+ which are described in this section. For the complete list, see the
+ man page (man 1 qemu).
+
+
+ Suboptions for the -drive option
+
+ file=image_fname
+
+
+ Specifies the path to the disk image that must be used with this
+ drive. If not specified, an empty (removable) drive is assumed.
+
+
+
+
+ if=drive_interface
+
+
+ Specifies the type of interface to which the drive is connected.
+ Currently, only floppy,
+ scsi, ide, or
+ virtio are supported by &suse;.
+ virtio defines a paravirtualized disk driver.
+ Default is ide.
+
+
+
+
+ index=index_of_connector
+
+
+ Specifies the index number of a connector on the disk interface
+ (see the if option) where the drive is
+ connected. If not specified, the index is automatically
+ incremented.
+
+
+
+
+ media=type
+
+
+ Specifies the type of media. Can be disk for
+ hard disks, or cdrom for removable CD-ROM
+ drives.
+
+
+
+
+ format=img_fmt
+
+
+ Specifies the format of the connected disk image. If not
+ specified, the format is autodetected. Currently, &suse; supports
+ raw and qcow2 formats.
+
+
+
+
+ cache=method
+
+
+ Specifies the caching method for the drive. Possible values are
+ unsafe, writethrough,
+ writeback, directsync, or
+ none. To improve performance when using the
+ qcow2 image format, select
+ writeback. none disables
+ the host page cache and, therefore, is the safest option. The default
+ for image files is writeback.
+
+
+
+
+
+
+ To simplify defining block devices, &qemu; understands several
+ shortcuts which you may find handy when entering the
+ qemu-system-ARCH command line.
+
+
+ You can use
+
+&prompt.sudo;qemu-system-x86_64 -cdrom /images/cdrom.iso
+
+ instead of
+
+&prompt.sudo;qemu-system-x86_64 -drive format=raw,file=/images/cdrom.iso,index=2,media=cdrom
+
+ and
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/imagei1.raw -hdb /images/image2.raw -hdc \
+/images/image3.raw -hdd /images/image4.raw
+
+ instead of
+
+&prompt.sudo;qemu-system-x86_64 -drive format=raw,file=/images/image1.raw,index=0,media=disk \
+-drive format=raw,file=/images/image2.raw,index=1,media=disk \
+-drive format=raw,file=/images/image3.raw,index=2,media=disk \
+-drive format=raw,file=/images/image4.raw,index=3,media=disk
+
+
+ Using host drives instead of images
+
+ As an alternative to using disk images (see
+ ) you can also use
+ existing &vmhost; disks, connect them as drives, and access them from
+ &vmguest;. Use the host disk device directly instead of disk image
+ file names.
+
+
+ To access the host CD-ROM drive, use
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive file=/dev/cdrom,media=cdrom
+
+ To access the host hard disk, use
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive file=/dev/hdb,media=disk
+
+ A host drive used by a &vmguest; must not be accessed concurrently by
+ the &vmhost; or another &vmguest;.
+
+
+
+ Freeing unused guest disk space
+
+ A is a type of disk image
+ file that grows in size as the user adds data to it, taking up only
+ as much disk space as is stored in it. For example, if you copy 1 GB
+ of data inside the sparse disk image, its size grows by 1 GB. If you
+ then delete, for example, 500 MB of the data, the image size does not
+ by default decrease as expected.
+
+
+ This is why the option is introduced on
+ the &kvm; command line. It tells the hypervisor to automatically free
+ the holes after deleting data from the sparse guest
+ image. This option is valid only for the
+ if=scsi drive interface:
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive format=img_format,file=/path/to/file.img,if=scsi,discard=on
+
+ Support status
+
+ is not supported. This interface does not
+ map to virtio-scsi, but rather to the
+ lsi SCSI adapter.
+
+
+
+
+ IOThreads
+
+ IOThreads are dedicated event loop threads for virtio devices to
+ perform I/O requests to improve scalability, especially on an SMP
+ &vmhost; with SMP &vmguest;s using many disk devices. Instead of
+ using &qemu;'s main event loop for I/O processing, IOThreads allow
+ spreading I/O work across multiple CPUs and can improve latency when
+ properly configured.
+
+
+ IOThreads are enabled by defining IOThread objects. virtio devices
+ can then use the objects for their I/O event loops. Many virtio
+ devices can use a single IOThread object, or virtio devices and
+ IOThread objects can be configured in a 1:1 mapping. The following
+ example creates a single IOThread with ID
+ iothread0, which is then used as the event loop for
+ two virtio-blk devices.
+
+&prompt.sudo;qemu-system-x86_64 [...] -object iothread,id=iothread0\
+-drive if=none,id=drive0,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive0,scsi=off,\
+iothread=iothread0 -drive if=none,id=drive1,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive1,scsi=off,\
+iothread=iothread0 [...]
+
+ The following qemu command line example illustrates a 1:1 virtio
+ device to IOThread mapping:
+
+&prompt.sudo;qemu-system-x86_64 [...] -object iothread,id=iothread0\
+-object iothread,id=iothread1 -drive if=none,id=drive0,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive0,scsi=off,\
+iothread=iothread0 -drive if=none,id=drive1,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive1,scsi=off,\
+ iothread=iothread1 [...]
+
+
+ Bio-based I/O path for virtio-blk
+
+ For better performance of I/O-intensive applications, a new I/O path
+ was introduced for the virtio-blk interface in kernel version 3.7.
+ This bio-based block device driver skips the I/O scheduler, and thus
+ shortens the I/O path in the guest and has lower latency. It is
+ especially useful for high-speed storage devices, such as SSD disks.
+
+
+ The driver is disabled by default. To use it, do the following:
+
+
+
+
+ Append to the kernel
+ command line on the guest. You can do so via
+ &yast;SystemBoot
+ Loader.
+
+
+ You can also do it by editing
+ /etc/default/grub, searching for the line
+ that contains , and
+ adding the kernel parameter at the end. Then run
+ grub2-mkconfig >/boot/grub2/grub.cfg to
+ update the grub2 boot menu.
+
+
+
+
+ Reboot the guest with the new kernel command line active.
+
+
+
+
+ Bio-based driver on slow devices
+
+ The bio-based virtio-blk driver does not help on slow devices such
+ as spin hard disks. The reason is that the benefit of scheduling is
+ larger than what the shortened bio path offers. Do not use the
+ bio-based driver on slow devices.
+
+
+
+
+ Accessing iSCSI resources directly
+
+
+ &qemu; now integrates with libiscsi. This allows
+ &qemu; to access iSCSI resources directly and use them as virtual
+ machine block devices. This feature does not require any host iSCSI
+ initiator configuration, as is needed for a libvirt iSCSI target-based storage pool setup. Instead, it directly connects guest storage
+ interfaces to an iSCSI target LUN via the user space library
+ libiscsi. iSCSI-based disk devices can also be specified in the
+ libvirt XML configuration.
+
+
+ RAW image format
+
+ This feature is only available using the RAW image format, as the
+ iSCSI protocol has certain technical limitations.
+
+
+
+
+ The following is the &qemu; command-line interface for iSCSI
+ connectivity.
+
+
+ virt-manager limitation
+
+ The use of libiscsi-based storage provisioning is not yet exposed
+ by the virt-manager interface, but instead it would be configured
+ by directly editing the guest XML. This new way of accessing iSCSI
+ based storage is to be done at the command line.
+
+
+&prompt.sudo;qemu-system-x86_64 -machine accel=kvm \
+ -drive file=iscsi://192.168.100.1:3260/iqn.2016-08.com.example:314605ab-a88e-49af-b4eb-664808a3443b/0,\
+ format=raw,if=none,id=mydrive,cache=none \
+ -device ide-hd,bus=ide.0,unit=0,drive=mydrive ...
+
+ Here is an example snippet of guest domain XML which uses the
+ protocol-based iSCSI:
+
+<devices>
+...
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source protocol='iscsi' name='iqn.2013-07.com.example:iscsi-nopool/2'>
+ <host name='example.com' port='3260'/>
+ </source>
+ <auth username='myuser'>
+ <secret type='iscsi' usage='libvirtiscsi'/>
+ </auth>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+</devices>
+
+ Contrast that with an example which uses the host-based iSCSI
+ initiator which virt-manager sets up:
+
+<devices>
+...
+ <disk type='block' device='disk'>
+ <driver name='qemu' type='raw' cache='none' io='native'/>
+ <source dev='/dev/disk/by-path/scsi-0:0:0:0'/>
+ <target dev='hda' bus='ide'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01'
+ function='0x1'/>
+ </controller>
+</devices>
+
+
+ Using RADOS block devices with &qemu;
+
+ RADOS Block Devices (RBD) store data in a Ceph cluster. They allow
+ snapshotting, replication and data consistency. You can use an RBD
+ from your &kvm;-managed &vmguest;s similarly to how you use other
+ block devices.
+
+
+ For more details, refer to the
+ &ses;
+ &admin;, chapter Ceph as a Back-end
+ for &qemu; &kvm; Instance.
+
+
+
+
+
+ Graphic devices and display options
+
+ This section describes &qemu; options affecting the type of the
+ emulated video card and the way the &vmguest; graphical output is
+ displayed.
+
+
+ Defining video cards
+
+ &qemu; uses -vga to define a video card used to
+ display the &vmguest;'s graphical output. The -vga
+ option understands the following values:
+
+
+
+ none
+
+
+ Disables video cards on &vmguest; (no video card is emulated).
+ You can still access the running &vmguest; via the serial
+ console.
+
+
+
+
+ std
+
+
+ Emulates a standard VESA 2.0 VBE video card. Use it if you
+ intend to use a high display resolution on the &vmguest;.
+
+
+
+
+ qxl
+
+
+ QXL is a paravirtual graphic card. It is VGA compatible
+ (including VESA 2.0 VBE support). qxl is
+ recommended when using the spice video
+ protocol.
+
+
+
+
+ virtio
+
+
+ Paravirtual VGA graphic card.
+
+
+
+
+
+
+ Display options
+
+ The following options affect the way the &vmguest;'s graphical output is
+ displayed.
+
+
+
+ -display gtk
+
+
+ Display video output in a GTK window. This interface provides
+ UI elements to configure and control the VM during runtime.
+
+
+
+
+ -display sdl
+
+
+ Display video output via SDL in a separate graphics window. For
+ more information, see the SDL documentation.
+
+
+
+
+ -spice option[,option[,...]]
+
+
+ Enables the Spice remote desktop protocol.
+
+
+
+
+ -display vnc
+
+
+ Refer to for more
+ information.
+
+
+
+
+ -nographic
+
+
+ Disables &qemu;'s graphical output. The emulated serial port is
+ redirected to the console.
+
+
+ After starting the virtual machine with
+ -nographic, press
+ A
+ H in the virtual console to view the list of
+ other useful shortcuts, for example, to toggle between the
+ console and the &qemu; monitor.
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw -nographic
+
+C-a h print this help
+C-a x exit emulator
+C-a s save disk data back to file (if -snapshot)
+C-a t toggle console timestamps
+C-a b send break (magic sysrq)
+C-a c switch between console and monitor
+C-a C-a sends C-a
+(pressed C-a c)
+
+&qemu; 2.3.1 monitor - type 'help' for more information
+(qemu)
+
+
+
+ -no-frame
+
+
+ Disables decorations for the &qemu; window. Convenient for
+ a dedicated desktop workspace.
+
+
+
+
+ -full-screen
+
+
+ Starts &qemu; graphical output in full-screen mode.
+
+
+
+
+ -no-quit
+
+
+ Disables the close button of the &qemu; window and prevents it
+ from being closed by force.
+
+
+
+
+ -alt-grab, -ctrl-grab
+
+
+ By default, the &qemu; window releases the
+ captured mouse after pressing
+ .
+ You can change the key combination to either
+
+ (-alt-grab), or the right
+ key
+ (-ctrl-grab).
+
+
+
+
+
+
+
+
+ USB devices
+
+ There are two ways to create USB devices usable by the &vmguest; in
+ &kvm;: you can either emulate new USB devices inside a &vmguest;, or
+ assign an existing host USB device to a &vmguest;. To use USB devices
+ in &qemu; you first need to enable the generic USB driver with the
+ option. Then you can specify individual devices
+ with the option.
+
+
+ Emulating USB devices in &vmguest;
+
+ &suse; currently supports the following types of USB devices:
+ disk, host,
+ serial, braille,
+ net, mouse, and
+ tablet.
+
+
+ Types of USB devices for the -usbdevice option
+
+ disk
+
+
+ Emulates a mass storage device based on a file. The optional
+ format option is used rather than detecting
+ the format.
+
+&prompt.sudo;qemu-system-x86_64 [...] -usbdevice
+ disk:format=raw:/virt/usb_disk.raw
+
+
+
+ host
+
+
+ Pass through the host device (identified by bus.addr).
+
+
+
+
+ serial
+
+
+ Serial converter to a host character device.
+
+
+
+
+ braille
+
+
+ Emulates a braille device using BrlAPI to display the braille
+ output.
+
+
+
+
+ net
+
+
+ Emulates a network adapter that supports CDC Ethernet and RNDIS
+ protocols.
+
+
+
+
+ mouse
+
+
+ Emulates a virtual USB mouse. This option overrides the default
+ PS/2 mouse emulation. The following example shows the hardware
+ status of a mouse on a &vmguest; started with
+ qemu-system-ARCH [...] -usbdevice mouse:
+
+&prompt.sudo;hwinfo --mouse
+20: USB 00.0: 10503 USB Mouse
+[Created at usb.122]
+UDI: /org/freedesktop/Hal/devices/usb_device_627_1_1_if0
+[...]
+Hardware Class: mouse
+Model: "Adomax &qemu; USB Mouse"
+Hotplug: USB
+Vendor: usb 0x0627 "Adomax Technology Co., Ltd"
+Device: usb 0x0001 "&qemu; USB Mouse"
+[...]
+
+
+
+ tablet
+
+
+ Emulates a pointer device that uses absolute coordinates (such
+ as a touchscreen). This option overrides the default PS/2 mouse
+ emulation. The tablet device is useful if you are viewing
+ a &vmguest; via the VNC protocol. See
+ for more information.
+
+
+
+
+
+
+
+
+ Character devices
+
+ Use -chardev to create a new character device. The
+ option uses the following general syntax:
+
+qemu-system-x86_64 [...] -chardev BACKEND_TYPE,id=ID_STRING
+
+ where BACKEND_TYPE can be one of
+ null, socket,
+ udp, msmouse,
+ vc, file,
+ pipe, console,
+ serial, pty,
+ stdio, braille,
+ tty, or parport. All character
+ devices must have a unique identification string up to 127 characters
+ long. It is used to identify the device in other related directives.
+ For the complete description of all back-end's suboptions, see the
+ man page (man 1 qemu). A brief description of the
+ available back-ends follows:
+
+
+
+ null
+
+
+ Creates an empty device that outputs no data and drops any data
+ it receives.
+
+
+
+
+ stdio
+
+
+ Connects to &qemu;'s process standard input and standard output.
+
+
+
+
+ socket
+
+
+ Creates a two-way stream socket. If
+ PATH is specified, a Unix socket is
+ created:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev \
+socket,id=unix_socket1,path=/tmp/unix_socket1,server
+
+ The SERVER suboption specifies that
+ the socket is a listening socket.
+
+
+ If PORT is specified, a TCP socket is
+ created:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev \
+socket,id=tcp_socket1,host=localhost,port=7777,server,nowait
+
+ The command creates a local listening (server)
+ TCP socket on port 7777. &qemu; does not block waiting for a
+ client to connect to the listening port
+ (nowait).
+
+
+
+
+ udp
+
+
+ Sends all network traffic from the &vmguest; to a remote host over
+ the UDP protocol.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev udp,id=udp_fwd,host=&wsIVname;,port=7777
+
+ The command binds port 7777 on the remote host &wsIVname; and
+ sends &vmguest; network traffic there.
+
+
+
+
+ vc
+
+
+ Creates a new &qemu; text console. You can optionally specify the
+ dimensions of the virtual console:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev vc,id=vc1,width=640,height=480 \
+-mon chardev=vc1
+
+ The command creates a new virtual console called
+ vc1 of the specified size, and connects the
+ &qemu; monitor to it.
+
+
+
+
+ file
+
+
+ Logs all traffic from the &vmguest; to a file on the &vmhost;. The
+ path is required and is automatically created
+ if it does not exist.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev file,id=qemu_log1,path=/var/log/qemu/guest1.log
+
+
+
+
+ By default, &qemu; creates a set of character devices for serial and
+ parallel ports, and a special console for &qemu; monitor. However, you
+ can create your own character devices and use them for the mentioned
+ purposes. The following options may help you:
+
+
+
+ -serial CHAR_DEV
+
+
+ Redirects the &vmguest;'s virtual serial port to a character
+ device CHAR_DEV on &vmhost;. By
+ default, it is a virtual console (vc) in
+ graphical mode, and stdio in non-graphical
+ mode. The -serial understands many
+ suboptions. See the man page man 1 qemu
+ for a complete list of them.
+
+
+ You can emulate up to four serial ports. Use -serial
+ none to disable all serial ports.
+
+
+
+
+ -parallel DEVICE
+
+
+ Redirects the &vmguest;'s parallel port to a
+ DEVICE. This option supports the same
+ devices as -serial.
+
+
+
+ With
+ &sls;&opensuse;
+ Leap as a &vmhost;, you can directly use the hardware
+ parallel port devices /dev/parportN where
+ N is the number of the port.
+
+
+
+ You can emulate up to three parallel ports. Use
+ -parallel none to disable all parallel ports.
+
+
+
+
+ -monitor CHAR_DEV
+
+
+ Redirects the &qemu; monitor to a character device
+ CHAR_DEV on the &vmhost;. This option
+ supports the same devices as -serial. By
+ default, it is a virtual console (vc) in
+ graphical mode, and stdio in non-graphical
+ mode.
+
+
+
+
+
+ For a complete list of available character device back-ends, see the
+ man page (man 1 qemu).
+
+
+
+
+ Networking in &qemu;
+
+
+ Use the -netdev option in combination with
+ to define a specific type of networking and a
+ network interface card for your &vmguest;. The syntax for the
+ option is
+
+
+-netdev type[,prop[=value][,...]]
+
+
+ Currently, &suse; supports the following network types:
+ user, bridge, and
+ tap. For a complete list of -netdev
+ suboptions, see the man page (man 1 qemu).
+
+
+
+ Supported -netdev suboptions
+
+ bridge
+
+
+ Uses a specified network helper to configure the TAP interface and
+ attach it to a specified bridge. For more information, see
+ .
+
+
+
+
+ user
+
+
+ Specifies user-mode networking. For more information, see
+ .
+
+
+
+
+ tap
+
+
+ Specifies bridged or routed networking. For more information, see
+ .
+
+
+
+
+
+
+ Defining a network interface card
+
+ Use -netdev together with the related
+ option to add a new emulated network card:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev tap,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,vlan=1,\
+macaddr=&wsIVmac;,name=ncard1
+
+
+
+ Specifies the network device type.
+
+
+
+
+ Specifies the model of the network card. Use
+ qemu-system-ARCH -device help and search for the
+ Network devices: section to get the list of all
+ network card models supported by &qemu; on your platform.
+
+ cwickert 2017-09-01: still up to date?
+
+ Currently, &suse; supports the models rtl8139,
+ e1000 and its variants
+ e1000-82540em, e1000-82544gc
+ and e1000-82545em, and
+ virtio-net-pci. To view a list of options for a
+ specific driver, add as a driver option:
+
+&prompt.sudo;qemu-system-x86_64 -device e1000,help
+e1000.mac=macaddr
+e1000.vlan=vlan
+e1000.netdev=netdev
+e1000.bootindex=int32
+e1000.autonegotiation=on/off
+e1000.mitigation=on/off
+e1000.addr=pci-devfn
+e1000.romfile=str
+e1000.rombar=uint32
+e1000.multifunction=on/off
+e1000.command_serr_enable=on/off
+
+
+
+ Connects the network interface to VLAN number 1. You can specify
+ your own number—it is mainly useful for identification
+ purposes. If you omit this suboption, &qemu; uses the default 0.
+
+
+
+
+ Specifies the Media Access Control (MAC) address for the network
+ card. It is a unique identifier, and you are advised to always
+ specify it. If not, &qemu; supplies its own default MAC address and
+ creates a possible MAC address conflict within the related VLAN.
+
+
+
+
+
+
+ User-mode networking
+
+ The -netdev user option instructs &qemu; to use
+ user-mode networking. This is the default if no networking mode is
+ selected. Therefore, these command lines are equivalent:
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw -netdev user,id=hostnet0
+
+ This mode is useful for allowing the &vmguest; to access the external
+ network resources, such as the Internet. By default, no incoming
+ traffic is permitted and therefore, the &vmguest; is not visible to
+ other machines on the network. No administrator privileges are required
+ in this networking mode. The user-mode is also useful for doing a
+ network boot on your &vmguest; from a local directory on the &vmhost;.
+
+
+ The &vmguest; allocates an IP address from a virtual DHCP server.
+ &vmhost; (the DHCP server) is reachable at 10.0.2.2, while the IP
+ address range for allocation starts from 10.0.2.15. You can use
+ ssh to connect to &vmhost; at 10.0.2.2, and
+ scp to copy files back and forth.
+
+
+ Command-line examples
+
+ This section shows several examples of how to set up user-mode
+ networking with &qemu;.
+
+
+ Restricted user-mode networking
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,vlan=1,name=user_net1,restrict=yes
+
+
+
+ Specifies user-mode networking.
+
+
+
+
+ Connects to VLAN number 1. If omitted, defaults to 0.
+
+
+
+
+ Specifies a human-readable name of the network stack. Useful
+ when identifying it in the &qemu; monitor.
+
+
+
+
+ Isolates &vmguest;. It then cannot communicate with the &vmhost;
+ and no network packets are routed to the external network.
+
+
+
+
+
+ User-mode networking with a custom IP range
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,net=10.2.0.0/8,host=10.2.0.6,\
+dhcpstart=10.2.0.20,hostname=tux_kvm_guest
+
+
+
+ Specifies the IP address of the network that the &vmguest; sees and
+ optionally the netmask. Default is 10.0.2.0/8.
+
+
+
+
+ Specifies the &vmhost; IP address that the &vmguest; sees. Default
+ is 10.0.2.2.
+
+
+
+
+ Specifies the first of the 16 IP addresses that the built-in
+ DHCP server can assign to a &vmguest;. Default is 10.0.2.15.
+
+
+
+
+ Specifies the host name that the built-in DHCP server assigns
+ to the &vmguest;.
+
+
+
+
+
+ User-mode networking with network-boot and TFTP
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,tftp=/images/tftp_dir,\
+bootfile=/images/boot/pxelinux.0
+
+
+
+ Activates a built-in TFTP (a file transfer protocol with the
+ functionality of a basic FTP) server. The files in the
+ specified directory are visible to a &vmguest; as the root of a
+ TFTP server.
+
+
+
+
+ Broadcasts the specified file as a BOOTP (a network protocol
+ that offers an IP address and the network location of a boot
+ image, often used in diskless workstations) file. When used
+ together with tftp, the &vmguest; can boot
+ via the network from the local directory on the host.
+
+
+
+
+
+ User-mode networking with host port forwarding
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,hostfwd=tcp::2222-:22
+
+ Forwards incoming TCP connections to the port 2222 on the host to
+ the port 22 (SSH) on the
+ &vmguest;. If sshd is
+ running on the &vmguest;, enter
+
+&prompt.user;ssh qemu_host -p 2222
+
+ where qemu_host is the host name or IP address
+ of the host system, to get an
+ SSH prompt from &vmguest;.
+
+
+
+
+
+
+ Bridged networking
+
+ With the -netdev tap option, &qemu; creates a
+ network bridge by connecting the host TAP network device to a specified
+ VLAN of the &vmguest;. Its network interface is then visible to the rest of
+ the network. This method does not work by default and needs to be
+ explicitly specified.
+
+
+ First, create a network bridge and add a &vmhost; physical network
+ interface to it, such as eth0:
+
+
+
+
+ Start &yastcc; and select
+ SystemNetwork
+ Settings.
+
+
+
+
+ Click Add and select Bridge
+ from the Device Type drop-down box in the
+ Hardware Dialog window. Click
+ Next.
+
+
+
+
+ Choose whether you need a dynamically or statically assigned IP
+ address and fill in the related network settings, if applicable.
+
+
+
+
+ In the Bridged Devices pane, select the Ethernet
+ device to add to the bridge.
+
+
+ Click Next. When asked about adapting an already
+ configured device, click Continue.
+
+
+
+
+ Click OK to apply the changes. Check if the
+ bridge has been created:
+
+&prompt.user;bridge link
+2: eth0 state UP : <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 master br0 \
+ state forwarding priority 32 cost 100
+
+
+
+
+ Connecting to a bridge manually
+
+ Use the following example script to connect the &vmguest; to the newly
+ created bridge interface br0. Several commands in
+ the script are run via the sudo mechanism because
+ they require &rootuser; privileges.
+
+
+ Required software
+
+ To manage a network bridge, you need to have the
+ tunctl package installed.
+
+
+
+#!/bin/bash
+bridge=br0
+tap=$(sudo tunctl -u $(whoami) -b)
+sudo ip link set $tap up
+sleep 1s
+sudo ip link add name $bridge type bridge
+sudo ip link set $bridge up
+sudo ip link set $tap master $bridge
+qemu-system-x86_64 -machine accel=kvm -m 512 -hda /images/sles_base.raw \
+ -netdev tap,id=hostnet0 \
+ -device virtio-net-pci,netdev=hostnet0,vlan=0,macaddr=&wsIVmac;,\
+ ifname=$tap,script=no,downscript=no
+sudo ip link set $tap nomaster
+sudo ip link set $tap down
+sudo tunctl -d $tap
+
+
+
+ Name of the bridge device.
+
+
+
+
+ Prepare a new TAP device and assign it to the user who runs the
+ script. TAP devices are virtual network devices often used for
+ virtualization and emulation setups.
+
+
+
+
+ Bring up the newly created TAP network interface.
+
+
+
+
+ Make a 1-second pause to make sure the new TAP network interface
+ is really up.
+
+
+
+
+ Add the new TAP device to the network bridge
+ br0.
+
+
+
+
+ The ifname= suboption specifies the name of
+ the TAP network interface used for bridging.
+
+
+
+
+ Before qemu-system-ARCH connects to a network
+ bridge, it checks the script and
+ downscript values. If it finds the specified
+ scripts on the &vmhost; file system, it runs the
+ script before it connects to the network
+ bridge and downscript after it exits the
+ network environment. You can use these scripts to set up and tear
+ down the bridged interfaces. By default,
+ /etc/qemu-ifup and
+ /etc/qemu-ifdown are examined. If
+ script=no and downscript=no
+ are specified, the script execution is disabled and you need to
+ take care of it manually.
+
+
+
+
+ Deletes the TAP interface from the network bridge
+ br0.
+
+
+
+
+ Sets the state of the TAP device to down.
+
+
+
+
+ Tear down the TAP device.
+
+
+
+
+
+ Connecting to a bridge with qemu-bridge-helper
+
+ Another way to connect a &vmguest; to a network through a network
+ bridge is via the qemu-bridge-helper helper
+ program. It configures the TAP interface for you and attaches it to
+ the specified bridge. The default helper executable is
+ /usr/lib/qemu-bridge-helper. The helper
+ executable is setuid root, which is only executable by members of
+ the virtualization group (kvm). Therefore, the
+ qemu-system-ARCH command itself does not need to
+ be run under &rootuser; privileges.
+
+
+ The helper is automatically called when you specify a network bridge:
+
+qemu-system-x86_64 [...] \
+ -netdev bridge,id=hostnet0,vlan=0,br=br0 \
+ -device virtio-net-pci,netdev=hostnet0
+
+ You can specify your own custom helper script that takes care of the
+ TAP device (de)configuration, with the
+ option:
+
+qemu-system-x86_64 [...] \
+ -netdev bridge,id=hostnet0,vlan=0,br=br0,helper=/path/to/bridge-helper \
+ -device virtio-net-pci,netdev=hostnet0
+
+
+ To define access privileges to
+ qemu-bridge-helper, inspect the
+ /etc/qemu/bridge.conf file. For example, the
+ following directive
+
+allow br0
+
+ allows the qemu-system-ARCH command to connect
+ its &vmguest; to the network bridge br0.
+
+
+
+
+
+
+ Viewing a &vmguest; with VNC
+
+
+ By default &qemu; uses a GTK (a cross-platform toolkit library) window to
+ display the graphical output of a &vmguest;.
+
+ 2014-08-06 - fs: feedback from brogers: We should explain the "display
+ xxx" command line syntax to present the various display options before
+ diving into the vnc details.
+
+ With the -vnc option specified, you can make &qemu;
+ listen on a specified VNC display and redirect its graphical output to
+ the VNC session.
+
+
+
+
+ When working with &qemu;'s virtual machine via a VNC session, it is
+ useful to work with the -usbdevice tablet option.
+
+
+ Moreover, if you need to use a keyboard layout other than the default
+ en-us, specify it with the -k
+ option.
+
+
+
+
+ The first suboption of -vnc must be a
+ display value. The -vnc option
+ understands the following display specifications:
+
+
+
+
+ host:display
+
+
+ Only connections from host on the display number
+ display are accepted. The TCP port on which the
+ VNC session is then running is normally a 5900 +
+ display number. If you do not specify
+ a host, connections are accepted from any host.
+
+
+
+
+ unix:path
+
+
+ The VNC server listens for connections on Unix domain sockets. The
+ path option specifies the location of the
+ related Unix socket.
+
+
+
+
+ none
+
+
+ The VNC server functionality is initialized, but the server itself
+ is not started. You can start the VNC server later with the &qemu;
+ monitor. For more information, see
+ .
+
+
+
+
+
+
+ Following the display value, there may be one or more option flags
+ separated by commas. Valid options are:
+
+
+
+
+ reverse
+
+
+ Connect to a listening VNC client via a
+ reverse connection.
+
+
+
+
+ websocket
+
+
+ Opens an additional TCP listening port dedicated to VNC WebSocket
+ connections. By definition the WebSocket port is 5700+display.
+
+
+
+
+ password
+
+
+ Require that password-based authentication is used for client
+ connections.
+
+
+
+
+ tls
+
+
+ Require that clients use TLS when communicating with the VNC
+ server.
+
+
+
+
+ x509=/path/to/certificate/dir
+
+
+ Valid if TLS is specified. Require that x509 credentials are used
+ for negotiating the TLS session.
+
+
+
+
+ x509verify=/path/to/certificate/dir
+
+
+ Valid if TLS is specified. Require that x509 credentials are used
+ for negotiating the TLS session.
+
+
+
+
+ sasl
+
+
+ Require that the client uses SASL to authenticate with the VNC
+ server.
+
+
+
+
+ acl
+
+
+ Turn on access control lists for checking of the x509 client
+ certificate and SASL party.
+
+
+
+
+ lossy
+
+
+ Enable lossy compression methods (gradient, JPEG, ...).
+
+
+
+
+ non-adaptive
+
+
+ Disable adaptive encodings. Adaptive encodings are enabled by
+ default.
+
+
+
+
+ share=[allow-exclusive|force-shared|ignore]
+
+
+ Set the display sharing policy.
+
+
+
+
+
+
+
+ For more details about the display options, see the
+ qemu-doc man page.
+
+
+
+
+ An example of VNC usage:
+
+
+&prompt.sudo;qemu-system-x86_64 [...] -vnc :5
+# (on the client:)
+&prompt.user;vncviewer &wsII;:5 &
+
+
+ &qemu; VNC session
+
+
+
+
+
+
+
+
+
+
+
+ Secure VNC connections
+
+ The default VNC server setup does not use any form of authentication.
+ In the previous example, any user can connect and view the &qemu; VNC
+ session from any host on the network.
+
+
+ There are several levels of security that you can apply to your VNC
+ client/server connection. You can either protect your connection with a
+ password, use x509 certificates, use SASL authentication, or even
+ combine several authentication methods in one &qemu; command.
+
+
+
+ For more information about configuring x509 certificates on a &vmhost;
+ and the client.
+
+
+ The Remmina VNC viewer supports advanced authentication mechanisms. For
+ this example, let us assume that the server x509 certificates
+ ca-cert.pem, server-cert.pem,
+ and server-key.pem are located in the
+ /etc/pki/qemu directory on the host. The client
+ certificates can be placed in any custom directory, as Remmina asks for
+ their path on connection start-up.
+
+
+ Password authentication
+qemu-system-x86_64 [...] -vnc :5,password -monitor stdio
+
+ Starts the &vmguest; graphical output on VNC display number 5, which
+ corresponds to port 5905. The password suboption
+ initializes a simple password-based authentication method. There is
+ no password set by default and you need to set one with the
+ change vnc password command in &qemu; monitor:
+
+&qemu; 2.3.1 monitor - type 'help' for more information
+(qemu) change vnc password
+Password: ****
+
+
+ You need the -monitor stdio option here, because
+ you would not be able to manage the &qemu; monitor without
+ redirecting its input/output.
+
+
+
+ Authentication dialog in Remmina
+
+
+
+
+
+
+
+
+
+
+ x509 certificate authentication
+
+ The &qemu; VNC server can use TLS encryption for the session and x509
+ certificates for authentication. The server asks the client for a
+ certificate and validates it against the CA certificate. Use this
+ authentication type if your company provides an internal certificate
+ authority.
+
+qemu-system-x86_64 [...] -vnc :5,tls,x509verify=/etc/pki/qemu
+
+
+ x509 certificate and password authentication
+
+ You can combine password authentication with TLS encryption and
+ x509 certificate authentication to create a two-layer authentication
+ model for clients. Remember to set the password in the &qemu; monitor
+ after you run the following command:
+
+qemu-system-x86_64 [...] -vnc :5,password,tls,x509verify=/etc/pki/qemu \
+-monitor stdio
+
+
+ SASL authentication
+
+ Simple Authentication and Security Layer (SASL) is a framework for
+ authentication and data security in Internet protocols. It integrates
+ several authentication mechanisms, like PAM, Kerberos, LDAP and more.
+ SASL keeps its own user database, so the connecting user accounts do
+ not need to exist on &vmhost;.
+
+
+ For security reasons, you are advised to combine SASL authentication
+ with TLS encryption and x509 certificates:
+
+qemu-system-x86_64 [...] -vnc :5,tls,x509,sasl -monitor stdio
+
+
+
+
diff --git a/references/sle16-upgrade-distribution-migration-syste-configuration.xml b/references/sle16-upgrade-distribution-migration-syste-configuration.xml
index 642c5f06a..310a9e22e 100644
--- a/references/sle16-upgrade-distribution-migration-syste-configuration.xml
+++ b/references/sle16-upgrade-distribution-migration-syste-configuration.xml
@@ -64,23 +64,22 @@
Specify Migration Product
- By default, the system will be migrated to SLES15 SP3. This default
- target can be changed through the migration_product setting.
+ By default, the system will be migrated to &slsa; 16.0. This default target can be changed
+ through the migration_product setting.
The product must be specified with the triplet name/version/arch
found in /etc/products.d/baseproduct of the target product,
for example:
-migration_product: SLES/15.3/x86_64
+migration_product: SLES/16.0/x86_64
Changing the default product leads to unsupported territory and
- is not tested nor covered by the SUSE support offering.
+ is not tested nor covered by the &suse; support offering.
The specified product name must be supported by the repository
server used for the migration. If the given product does not
exist or the repository server cannot calculate an upgrade
path, an error message from the repository server will be
- logged in the migration log file. Also see:
- Lifecycle and support.
+ logged in the migration log file.
diff --git a/references/virt-components-installation-patterns.xml b/references/virt-components-installation-patterns.xml
index e4f5a7dc3..bfe98779b 100644
--- a/references/virt-components-installation-patterns.xml
+++ b/references/virt-components-installation-patterns.xml
@@ -26,8 +26,10 @@
You can install the virtualization tools required to run a &vmhost; either when installing the
- system (see the manual
+ installationmanual
installation), or from an alerady installed system by installing a virtualization pattern. The later
option is described bellow:
diff --git a/references/virtualization-io.xml b/references/virtualization-io.xml
new file mode 100644
index 000000000..b0930f271
--- /dev/null
+++ b/references/virtualization-io.xml
@@ -0,0 +1,171 @@
+
+
+ %entities;
+]>
+
+
+ I/O virtualization
+
+
+
+
+ yes
+
+
+
+
+ &vmguest;s not only share CPU and memory resources of the host system, but
+ also the I/O subsystem. Because software I/O virtualization techniques
+ deliver less performance than bare metal, hardware solutions that deliver
+ almost native performance have been developed recently.
+ &productname; supports the following I/O virtualization techniques:
+
+
+
+
+ Full virtualization
+
+
+ Fully Virtualized (FV) drivers emulate widely supported real devices,
+ which can be used with an existing driver in the &vmguest;. The guest is
+ also called Hardware Virtual Machine (HVM).
+ Since the physical device on the &vmhost; may differ from the emulated
+ one, the hypervisor needs to process all I/O operations before handing
+ them over to the physical device. Therefore all I/O operations need to
+ traverse two software layers, a process that not only significantly
+ impacts I/O performance, but also consumes CPU time.
+
+
+
+
+ Paravirtualization
+
+
+ Paravirtualization (PV) allows direct communication between the hypervisor
+ and the &vmguest;. With less overhead involved, performance is much better
+ than with full virtualization. However, paravirtualization requires either
+ the guest operating system to be modified to support the
+ paravirtualization API, or availability of paravirtualized drivers. For a list of guest operating systems supporting paravirtualization, refer to
+ the section Availability of paravirtualized drivers in the article Virtualization
+ Limits and Support.
+
+
+
+
+ PVHVM
+
+
+ This type of virtualization enhances HVM (see
+ ) with paravirtualized (PV) drivers, and PV
+ interrupt and timer handling.
+
+
+
+
+ VFIO
+
+
+ VFIO stands for Virtual Function I/O and is a new
+ user-level driver framework for Linux. It replaces the traditional &kvm;
+ &pciback; device assignment. The VFIO driver exposes direct device access
+ to user space in a secure memory Input/Output Memory Management Unit (IOMMU)
+ protected environment.
+ With VFIO, a &vmguest; can directly access hardware devices on the
+ &vmhost; (pass-through), avoiding performance issues caused by emulation
+ in performance critical paths. This method does not allow to share
+ devices—each device can only be assigned to a single &vmguest;. VFIO
+ needs to be supported by the &vmhost; CPU, chipset and the BIOS/EFI.
+
+
+ Compared to the legacy &kvm; PCI device assignment, VFIO has the following
+ advantages:
+
+
+
+
+ Resource access is compatible with &uefisecboot;.
+
+
+
+
+ Device is isolated and its memory access protected.
+
+
+
+
+ Offers a user space device driver with more flexible device ownership
+ model.
+
+
+
+
+ Is independent of &kvm; technology, and not bound to x86 architecture
+ only.
+
+
+
+
+ In &productname; the USB and PCI pass-through methods of device assignment
+ are considered deprecated and are superseded by the VFIO model.
+
+
+
+
+ SR-IOV
+
+
+ The latest I/O virtualization technique, Single Root I/O Virtualization
+ SR-IOV combines the benefits of the aforementioned
+ techniques—performance and the ability to share a device with
+ several &vmguest;s. SR-IOV requires special I/O devices, that are capable
+ of replicating resources so they appear as multiple separate devices. Each
+ such pseudo device can be directly used by a single guest.
+ However, for network cards for example the number of concurrent queues
+ that can be used is limited, potentially reducing performance for the
+ &vmguest; compared to paravirtualized drivers. On the &vmhost;, SR-IOV
+ must be supported by the I/O device, the CPU and chipset, the BIOS/EFI and
+ the hypervisor—.
+
+
+
+
+
+
+ Requirements for VFIO and SR-IOV
+
+ To be able to use the VFIO and SR-IOV features, the &vmhost; needs to
+ fulfill the following requirements:
+
+
+
+
+ IOMMU needs to be enabled in the BIOS/EFI.
+
+
+
+
+ For Intel CPUs, the kernel parameter intel_iommu=on
+ needs to be provided on the kernel command line. For more information, see
+ .
+
+
+
+
+ The VFIO infrastructure needs to be available. This can be achieved by
+ loading the kernel module
+ vfio_pci.
+
+
+
+
+
diff --git a/references/virtualization-modes.xml b/references/virtualization-modes.xml
index 02a24bc6c..c445c5a92 100644
--- a/references/virtualization-modes.xml
+++ b/references/virtualization-modes.xml
@@ -45,7 +45,7 @@
Certain guest operating systems hosted in full virtualization mode can be configured to
use drivers from the SUSE Virtual Machine Drivers Pack (VMDP) instead of drivers
included in the operating system. Running virtual machine drivers improves performance
- on guest operating systems, such as Windows Server 2003.
+ on guest operating systems, such as Windows Server.
diff --git a/references/virtualization-spice-removal.xml b/references/virtualization-spice-removal.xml
new file mode 100644
index 000000000..1d8ef266b
--- /dev/null
+++ b/references/virtualization-spice-removal.xml
@@ -0,0 +1,156 @@
+
+
+ %entities;
+]>
+
+
+
+ Deprecation of Spice and Migration to VNC in &productname; 16
+
+
+
+ yes
+
+
+
+ Overview of Spice Deprecation
+
+ Removal of Spice in &suselinux; 16
+
+ Starting with &suselinux; 16, support for the Spice remote computing protocol for virtual machines (VMs)
+ has been completely removed in favor of VNC, which is a more universally adopted standard.
+ Consequently, any attempt to start a Spice-based VM on an &productnameshort; 16 host will fail with an error.
+ All existing VMs that were created on &productnameshort; 15 SP7 or earlier and are configured to use Spice for the
+ graphical console must be manually converted to use VNC. Currently, no automated conversion tool
+ is provided, so this process requires manually editing the VM's XML configuration file.
+
+
+
+ The following procedure outlines the steps to convert a VM from Spice to VNC.
+
+
+
+ Ensure the virtual machine is completely shut down before making any changes. This prevents any potential data corruption or configuration mismatches.
+ &prompt.sudo;virsh shutdown VM-NAME
+
+
+ Export the current XML configuration of the VM to a file. This file will be edited in the next steps.
+ &prompt.sudo;virsh dumpxml VM-NAME > VM-NAME.xml
+
+
+ Create a backup of the original XML definition. This allows you to restore the original configuration if something goes wrong during the editing process.
+ &prompt.user;cp VM-NAME.xmlVM-NAME-SPICE.xml
+
+
+ Manually edit the VM's XML configuration file to remove all Spice-related elements and
+ modify others to use VNC. For detailed information, see the subsequent sections.
+
+
+
+
+
+ Removing Spice-Specific XML Elements
+ The following XML snippets are specific to the Spice protocol and must be removed from the VM's configuration file. These elements enable features like the Spice agent channel and USB redirection over Spice, which are no longer supported.
+
+ <!-- This channel is for the Spice agent and is no longer needed -->
+ <channel type='spicevmc'>
+ <target type='virtio' name='com.redhat.spice.0'/>
+ <address type='virtio-serial' controller='0' bus='0' port='2'/>
+ </channel>
+
+ <!-- These elements enable USB redirection over Spice and must be removed -->
+ <redirdev bus='usb' type='spicevmc'>
+ <address type='usb' bus='0' port='2'/>
+ </redirdev>
+
+ <redirdev bus='usb' type='spicevmc'>
+ <address type='usb' bus='0' port='3'/>
+ </redirdev>
+
+
+
+ Modifying XML Elements for VNC Compatibility
+ After removing the Spice-specific elements, modify several other XML elements to switch the VM from using Spice to VNC for its graphical console and other devices.
+
+ Graphics and Audio
+ The primary change is to switch the graphics display protocol from Spice to VNC. You should also disable the audio device, as Spice-based audio is no longer supported.
+ Replace the Spice graphics configuration:
+ <graphics type='spice' autoport='yes' listen='127.0.0.1'>
+ with the VNC equivalent:
+ <graphics type='vnc' autoport='yes' listen='127.0.0.1'>
+ Then, replace the Spice audio configuration:
+ <audio id='1' type='spice'/>
+ with:
+ <audio id='1' type='none'/>
+
+
+ QXL Video Device Conversion for Windows VMs
+ The QXL video driver is optimized for the Spice protocol and is commonly used in Windows VMs. For better compatibility with VNC, we recommend replacing it with the more generic virtio video driver.
+ If your VM, especially a Windows guest, uses a QXL video device like this:
+
+ <video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1' primary='yes'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
+ </video>
+
+ Replace the entire <video> block with a simpler virtio model definition:
+
+ <video>
+ <model type="virtio"/>
+ </video>
+
+
+ When you define the VM, &libvirt; will automatically fill in the necessary default values for the virtio video device, so you don't need to specify details like RAM or bus address.
+
+
+
+
+ Applying the new configuration and starting the VM
+ After you have finished editing the XML file and saved your changes, you need to make &libvirt; aware of the new configuration. This is done by first undefining the VM, which removes its old configuration from &libvirt;, and then defining it again using the modified XML file.
+
+
+ Undefine the existing VM configuration and then define it again with the modified XML file:
+ &prompt.sudo;virsh undefine <vm_name>
+ &prompt.sudo;virsh define <vm_name>.xml
+
+
+ Start the VM, which will be using VNC for its graphical console.
+ &prompt.sudo;virsh start <vm_name>
+
+
+
+
+
+ Restoring copy-paste functionality
+ A common feature available with Spice is copy-paste functionality between the host and the guest VM, which is handled by the Spice agent. After migrating to VNC, this functionality will be lost. However, you can restore it by using the &qemu; guest agent.
+
+
+
+ Ensure you have updated gtk-vnc packages on your &productnameshort; 16 host.
+ The versions shipped with the initial release of &productnameshort; 16 may not have support for copy-paste over
+ VNC.
+
+
+
+ Inside the guest VM, ensure the &qemu; guest agent is installed and the corresponding service is enabled to start on boot.
+ &prompt.sudo;systemctl enable qemu-guest-agent
+
+
+ Add a new channel definition to the VM's XML configuration. This channel allows communication with the &qemu; guest agent for features like copy-paste.
+
+ <channel type="qemu-vdagent">
+ <source>
+ <clipboard copypaste="yes"/>
+ </source>
+ <target type="virtio" name="com.redhat.spice.0"/>
+ <address type="virtio-serial" controller="0" bus="0" port="1"/>
+ </channel>
+
+
+ Shut down and restart the VM for the changes to take effect.
+
+
+
+
diff --git a/references/virtualization-support.xml b/references/virtualization-support.xml
index 1194b8956..4b01f0dd9 100644
--- a/references/virtualization-support.xml
+++ b/references/virtualization-support.xml
@@ -18,7 +18,7 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- Virtualization limits and support
+ Virtualization Limits and Support
Virtualization limits and support
@@ -172,7 +172,7 @@
- 4 TiB
+ 8 TiB
@@ -276,7 +276,7 @@
- KVM (&sls; 15 SP6 guest must use UEFI boot)
+ &kvm; (&sls; 15 SP6 guest must use UEFI boot)
@@ -288,7 +288,7 @@
- KVM
+ &kvm;
@@ -320,7 +320,7 @@
- Oracle Linux KVM 7,
+ Oracle Linux &kvm; 7,
8
@@ -393,7 +393,7 @@
- &sls; 15 SP3, 15 SP4, 15 SP5, 15 SP6, 15 SP6
+ &sls; 15 SP3, 15 SP4, 15 SP5, 15 SP6, 15 SP7
@@ -521,8 +521,8 @@
Offline migration scenarios
&suse; supports offline migration, powering off a guest VM, then moving
- it to a host running a different &slea; product, from &slea; 12 to
- &slea; 15 SPX. The following host operating
+ it to a host running a different &productnameshort; product, from &productnameshort; 12 to
+ &productnameshort; 15 SPX. The following host operating
system combinations are fully supported (L3) for migrating guests from
one host to another:
@@ -875,11 +875,9 @@
Confidential Computing
-
- &slsa; 15 SP6 includes kernel patches and tooling to
- enable Intel TDX Confidential Computing technology in the
- product. As this technology is not yet fully ready for a production
- environment, it is provided as a technology preview.
+
+ For information about Confidential computing, refer to the release notes:
+ .
diff --git a/references/vt-acronyms.xml b/references/vt-acronyms.xml
new file mode 100644
index 000000000..1a7ebe669
--- /dev/null
+++ b/references/vt-acronyms.xml
@@ -0,0 +1,442 @@
+
+
+ %entities;
+]>
+
+
+ Acronyms
+
+
+
+
+ yes
+
+
+ ACPI
+
+
+ Advanced Configuration and Power Interface (ACPI) specification provides
+ an open standard for device configuration and power management by the
+ operating system.
+
+
+
+ AER
+
+
+ Advanced Error Reporting
+
+
+ AER is a capability provided by the PCI Express specification which
+ allows for reporting of PCI errors and recovery from some of them.
+
+
+
+ APIC
+
+
+ Advanced Programmable Interrupt Controller (APIC) is a family of
+ interrupt controllers.
+
+
+
+ BDF
+
+
+ Bus:Device:Function
+
+
+ Notation used to succinctly describe PCI and PCIe devices.
+
+
+
+ CG
+
+
+ Control Groups
+
+
+ Feature to limit, account and isolate resource usage (CPU, memory, disk
+ I/O, etc.).
+
+
+
+ EDF
+
+
+ Earliest Deadline First
+
+
+ This scheduler provides weighted CPU sharing in an intuitive way and
+ uses real-time algorithms to ensure time guarantees.
+
+
+
+ EPT
+
+
+ Extended Page Tables
+
+
+ Performance in a virtualized environment is close to that in a native
+ environment. Virtualization does create some overheads, however. These
+ come from the virtualization of the CPU, the
+ , and the I/O devices. In some
+ recent x86 processors AMD and Intel have begun to provide hardware
+ extensions to help bridge this performance gap. In 2006, both vendors
+ introduced their first generation hardware support for x86
+ virtualization with AMD-Virtualization (AMD-V) and Intel® VT-x
+ technologies. Recently Intel introduced its second generation of
+ hardware support that incorporates MMU-virtualization, called Extended
+ Page Tables (EPT). EPT-enabled systems can improve performance compared
+ to using shadow paging for
+ virtualization. EPT increases memory access latencies for a few
+ workloads. This cost can be reduced by effectively using large pages in
+ the guest and the hypervisor.
+
+
+
+ HAP
+
+
+ High Assurance Platform
+
+
+ HAP combines hardware and software technologies to improve workstation
+ and network security.
+
+
+
+ HVM
+
+
+ Hardware Virtual Machine.
+
+
+
+ IOMMU
+
+
+ Input/Output Memory Management Unit
+
+
+ IOMMU (AMD* technology) is a memory management unit
+ () that connects a direct memory
+ access-capable (DMA-capable) I/O bus to the main memory.
+
+
+
+ KSM
+
+
+ Kernel Same Page Merging
+
+
+ KSM allows for automatic sharing of identical memory pages between
+ guests to save host memory. &kvm; is optimized to use KSM if enabled
+ on the &vmhost;.
+
+
+
+ MMU
+
+
+ Memory Management Unit
+
+
+ is a computer hardware component responsible for handling accesses to
+ memory requested by the CPU. Its functions include translation of
+ virtual addresses to physical addresses (that is, virtual memory
+ management), memory protection, cache control, bus arbitration and in
+ simpler computer architectures (especially 8-bit systems) bank
+ switching.
+
+
+
+ PAE
+
+
+ Physical Address Extension
+
+
+ 32-bit x86 operating systems use Physical Address Extension (PAE) mode
+ to enable addressing of more than 4 GB of physical memory. In PAE mode,
+ page table entries (PTEs) are 64 bits in size.
+
+
+
+ PCID
+
+
+ Process-context identifiers
+
+
+ These are a facility by which a logical processor may cache information
+ for multiple linear-address spaces so that the processor may retain
+ cached information when software switches to a different linear address
+ space. INVPCID instruction is used for fine-grained
+ flush, which is benefit for
+ kernel.
+
+
+
+ PCIe
+
+
+ Peripheral Component Interconnect Express
+
+
+ PCIe was designed to replace older PCI, PCI-X and AGP bus standards.
+ PCIe has numerous improvements including a higher maximum system bus
+ throughput, a lower I/O pin count and smaller physical footprint.
+ Moreover it also has a more detailed error detection and reporting
+ mechanism (), and a native
+ hotplug functionality. It is also backward compatible with PCI.
+
+
+
+ PSE and PSE36
+
+
+ Page Size Extended
+
+
+ PSE refers to a feature of x86 processors that allows for pages larger
+ than the traditional 4 KiB size. PSE-36 capability offers 4 more bits,
+ in addition to the normal 10 bits, which are used inside a page
+ directory entry pointing to a large page. This allows a large page to be
+ located in 36-bit address space.
+
+
+
+ PT
+
+
+ Page Table
+
+
+ A page table is the data structure used by a virtual memory system in a
+ computer operating system to store the mapping between virtual addresses
+ and physical addresses. Virtual addresses are those unique to the
+ accessing process. Physical addresses are those unique to the hardware
+ (RAM).
+
+
+
+ QXL
+
+
+ QXL is a cirrus VGA framebuffer (8M) driver for virtualized environment.
+
+
+
+ RVI or NPT
+
+
+ Rapid Virtualization Indexing, Nested Page Tables
+
+
+ An AMD second generation hardware-assisted virtualization technology for
+ the processor memory management unit
+ ().
+
+
+
+ SATA
+
+
+ Serial ATA
+
+
+ SATA is a computer bus interface that connects host bus adapters to mass
+ storage devices such as hard disks and optical drives.
+
+
+
+ Seccomp2-based sandboxing
+
+
+ Sandboxed environment where only predetermined system calls are
+ permitted for added protection against malicious behavior.
+
+
+
+ SPICE
+
+
+ Simple Protocol for Independent Computing Environments
+
+
+
+ TCG
+
+
+ Tiny Code Generator
+
+
+ Instructions are emulated rather than executed by the CPU.
+
+
+
+ THP
+
+
+ Transparent Huge Pages
+
+
+ This allows CPUs to address memory using pages larger than the default 4
+ KB. This helps reduce memory consumption and CPU cache usage. &kvm;
+ is optimized to use THP (via madvise and opportunistic methods) if
+ enabled on the &vmhost;.
+
+
+
+ TLB
+
+
+ Translation Lookaside Buffer
+
+
+ TLB is a cache that memory management hardware uses to improve virtual
+ address translation speed. All current desktop, notebook, and server
+ processors use a TLB to map virtual and physical address spaces, and it
+ is nearly always present in any hardware that uses virtual memory.
+
+
+
+ VCPU
+
+
+ A scheduling entity, containing each state for virtualized CPU.
+
+
+
+ VDI
+
+
+ Virtual Desktop Infrastructure
+
+
+
+ VFIO
+
+
+ Since kernel v3.6; a new method of accessing PCI devices from user space
+ called VFIO.
+
+
+
+ VHS
+
+
+ Virtualization Host Server
+
+
+
+ VMCS
+
+
+ Virtual Machine Control Structure
+
+
+ VMX non-root operation and VMX transitions are controlled by a data
+ structure called a virtual-machine control structure (VMCS). Access to
+ the VMCS is managed through a component of processor state called the
+ VMCS pointer (one per logical processor). The value of the VMCS pointer
+ is the 64-bit address of the VMCS. The VMCS pointer is read and written
+ using the instructions VMPTRST and VMPTRLD. The
+ configures a VMCS
+ using the VMREAD, VMWRITE, and VMCLEAR instructions. A
+ could use a different
+ VMCS for each virtual machine that it supports. For a virtual machine
+ with multiple logical processors (virtual processors), the
+ could use a different
+ VMCS for each virtual processor.
+
+
+
+ VMDq
+
+
+ Virtual Machine Device Queue
+
+
+ Multi-queue network adapters exist which support multiple VMs at the
+ hardware level, having separate packet queues associated to the
+ different hosted VMs (by means of the IP addresses of the VMs).
+
+
+
+ VMM
+
+
+ Virtual Machine Monitor (Hypervisor)
+
+
+ When the processor encounters an instruction or event of interest to the
+ Hypervisor (), it exits
+ from guest mode back to the VMM. The VMM emulates the instruction or
+ other event, at a fraction of native speed, and then returns to guest
+ mode. The transitions from guest mode to the VMM and back again are
+ high-latency operations, during which guest execution is completely
+ stalled.
+
+
+
+ VM root
+
+
+ will run in
+ root operation and guest software
+ will run in non-root operation.
+ Transitions between root
+ operation and non-root operation
+ are called transitions.
+
+
+
+ VMX
+
+
+ Virtual Machine eXtensions
+
+
+
+ VPID
+
+
+ New support for software control of
+ (VPID improves
+ performance with small
+ development effort).
+
+
+
+ VT-d
+
+
+ Virtualization Technology for Directed I/O
+
+
+ Like for
+ Intel*.
+
+
+
+ vTPM
+
+
+ Component to establish end-to-end integrity for guests via Trusted
+ Computing.
+
+
+
+
diff --git a/references/vt-glossary.xml b/references/vt-glossary.xml
new file mode 100644
index 000000000..992273dc5
--- /dev/null
+++ b/references/vt-glossary.xml
@@ -0,0 +1,291 @@
+
+
+ %entities;
+]>
+
+
+ Glossary
+
+
+
+ yes
+
+
+
+ General
+ Virtual Machine Manager
+
+
+ A software program that provides a graphical user interface for
+ creating and managing virtual machines.
+
+
+
+ Virtualized
+
+
+ A guest operating system or application running on a virtual machine.
+
+
+
+ Virtual Machine
+
+
+ A virtualized PC environment (VM) capable of hosting a guest
+ operating system and associated applications. Could be also called a
+ &vmguest;.
+
+
+
+ VHS
+
+
+ Virtualization Host Server
+
+
+ The physical computer running a &suse; virtualization platform
+ software. The virtualization environment consists of the hypervisor,
+ the host environment, virtual machines and associated tools, commands
+ and configuration files. Other commonly used terms include host, Host
+ Computer, Host Machine (HM), Virtual Server (VS), Virtual Machine
+ Host (VMH), and VM Host Server (VHS).
+
+
+
+ hardware-assisted
+
+
+ Intel* and AMD* provide virtualization hardware-assisted technology.
+ This reduces the frequency of VM IN/OUT (fewer VM traps), because
+ software is a major source of overhead, and increases the efficiency
+ (the execution is done by the hardware). Moreover, this reduces the
+ memory footprint, provides better resource control, and allows secure
+ assignment of specific I/O devices.
+
+
+
+ Create Virtual Machine Wizard
+
+
+ Virtual Machine Manager
+ provides a graphical interface to guide you through the steps to
+ create virtual machines. It can also be run in text mode by entering
+ virt-install at a command prompt in the host
+ environment.
+
+
+
+ Host Environment
+
+
+ The desktop or command line environment that allows interaction with
+ the host computer's environment. It provides a command line
+ environment and can also include a graphical desktop, such as &gnome;
+ or IceWM. The host environment runs as a special type of virtual
+ machine that has privileges to control and manage other virtual
+ machines.
+
+
+
+ Hypervisor
+
+
+ The software that coordinates the low-level interaction between
+ virtual machines and the underlying physical computer hardware.
+
+
+
+ Paravirtualized Frame Buffer
+
+
+ The video output device that drives a video display from a memory
+ buffer containing a complete frame of data for virtual machine
+ displays running in paravirtual mode.
+
+
+
+ VirtFS
+
+
+ VirtFS is a new paravirtualized file system interface designed for
+ improving pass-through technologies in the KVM environment. It is
+ based on the VirtIO framework.
+
+
+
+
+
+ CPU
+ CPU capping
+
+
+ Virtual CPU capping allows you to set vCPU capacity to 1–100 percent
+ of the physical CPU capacity.
+
+
+
+ CPU over-commitment
+
+
+ Virtual CPU over-commitment is the ability to assign more virtual
+ CPUs to VMs than the actual number of physical CPUs present in the
+ physical system. This procedure does not increase the overall
+ performance of the system, but may be useful for testing purposes.
+
+
+
+ CPU hotplugging
+
+
+ CPU hotplugging is used to describe the functions of
+ replacing/adding/removing a CPU without shutting down the system.
+
+
+
+ CPU pinning
+
+
+ Processor affinity, or CPU pinning enables the binding and unbinding
+ of a process or a thread to a central processing unit (CPU) or a
+ range of CPUs.
+
+
+
+
+
+ Network
+ Bridged Networking
+
+
+ A type of network connection that lets a virtual machine be
+ identified on an external network as a unique identity that is
+ separate from and unrelated to its host computer.
+
+
+
+ Empty Bridge
+
+
+ A type of network bridge that has no physical network device or
+ virtual network device provided by the host. This lets virtual
+ machines communicate with other virtual machines on the same host but
+ not with the host or on an external network.
+
+
+
+ External Network
+
+
+ The network outside a host's internal network environment.
+
+
+
+ Internal Network
+
+
+ A type of network configuration that restricts virtual machines to
+ their host environment.
+
+
+
+ Local Bridge
+
+
+ A type of network bridge that has a virtual network device but no
+ physical network device provided by the host. This lets virtual
+ machines communicate with the host and other virtual machines on the
+ host. Virtual machines can communicate on an external network through
+ the host.
+
+
+
+ Network Address Translation (NAT)
+
+
+ A type of network connection that lets a virtual machine use the IP
+ address and MAC address of the host.
+
+
+
+ No Host Bridge
+
+
+ A type of network bridge that has a physical network device but no
+ virtual network device provided by the host. This lets virtual
+ machines communicate on an external network but not with the host.
+ This lets you separate virtual machine network communications from
+ the host environment.
+
+
+
+ Traditional Bridge
+
+
+ A type of network bridge that has both a physical network device and
+ a virtual network device provided by the host.
+
+
+
+
+
+ Storage
+ AHCI
+
+
+ The Advanced Host Controller Interface (AHCI) is a technical standard
+ defined by Intel* that specifies the operation of Serial ATA (SATA)
+ host bus adapters in a non-implementation-specific manner.
+
+
+
+ Block Device
+
+
+ Data storage devices, such as CD-ROM drives or disk drives, that move
+ data in the form of blocks. Partitions and volumes are also
+ considered block devices.
+
+
+
+ File-Backed Virtual Disk
+
+
+ A virtual disk based on a file, also called a disk image file.
+
+
+
+ Raw Disk
+
+
+ A method of accessing data on a disk at the individual byte level
+ instead of through its file system.
+
+
+
+ Sparse image file
+
+
+ A disk image file that does not reserve its entire amount of disk
+ space but expands as data is written to it.
+
+
+
+ xvda
+
+
+ The drive designation given to the first virtual disk on a
+ paravirtual machine.
+
+
+
+
+
+
diff --git a/references/vt_io.xml b/references/vt_io.xml
new file mode 100644
index 000000000..7a4f03d9d
--- /dev/null
+++ b/references/vt_io.xml
@@ -0,0 +1,171 @@
+
+
+ %entities;
+]>
+
+
+ I/O virtualization
+
+
+
+
+ yes
+
+
+
+
+ &vmguest;s not only share CPU and memory resources of the host system, but
+ also the I/O subsystem. Because software I/O virtualization techniques
+ deliver less performance than bare metal, hardware solutions that deliver
+ almost native performance have been developed recently.
+ &productname; supports the following I/O virtualization techniques:
+
+
+
+
+ Full virtualization
+
+
+ Fully Virtualized (FV) drivers emulate widely supported real devices,
+ which can be used with an existing driver in the &vmguest;. The guest is
+ also called Hardware Virtual Machine (HVM).
+ Since the physical device on the &vmhost; may differ from the emulated
+ one, the hypervisor needs to process all I/O operations before handing
+ them over to the physical device. Therefore all I/O operations need to
+ traverse two software layers, a process that not only significantly
+ impacts I/O performance, but also consumes CPU time.
+
+
+
+
+ Paravirtualization
+
+
+ Paravirtualization (PV) allows direct communication between the hypervisor
+ and the &vmguest;. With less overhead involved, performance is much better
+ than with full virtualization. However, paravirtualization requires either
+ the guest operating system to be modified to support the
+ paravirtualization API or paravirtualized drivers. See
+ for a list of guest operating systems supporting
+ paravirtualization.
+
+
+
+
+ PVHVM
+
+
+ This type of virtualization enhances HVM (see
+ ) with paravirtualized (PV) drivers, and PV
+ interrupt and timer handling.
+
+
+
+
+ VFIO
+
+
+ VFIO stands for Virtual Function I/O and is a new
+ user-level driver framework for Linux. It replaces the traditional &kvm;
+ &pciback; device assignment. The VFIO driver exposes direct device access
+ to user space in a secure memory
+ () protected environment.
+ With VFIO, a &vmguest; can directly access hardware devices on the
+ &vmhost; (pass-through), avoiding performance issues caused by emulation
+ in performance critical paths. This method does not allow to share
+ devices—each device can only be assigned to a single &vmguest;. VFIO
+ needs to be supported by the &vmhost; CPU, chipset and the BIOS/EFI.
+
+
+ Compared to the legacy &kvm; PCI device assignment, VFIO has the following
+ advantages:
+
+
+
+
+ Resource access is compatible with &uefisecboot;.
+
+
+
+
+ Device is isolated and its memory access protected.
+
+
+
+
+ Offers a user space device driver with more flexible device ownership
+ model.
+
+
+
+
+ Is independent of &kvm; technology, and not bound to x86 architecture
+ only.
+
+
+
+
+ In &productname; the USB and PCI pass-through methods of device assignment
+ are considered deprecated and are superseded by the VFIO model.
+
+
+
+
+ SR-IOV
+
+
+ The latest I/O virtualization technique, Single Root I/O Virtualization
+ SR-IOV combines the benefits of the aforementioned
+ techniques—performance and the ability to share a device with
+ several &vmguest;s. SR-IOV requires special I/O devices, that are capable
+ of replicating resources so they appear as multiple separate devices. Each
+ such pseudo device can be directly used by a single guest.
+ However, for network cards for example the number of concurrent queues
+ that can be used is limited, potentially reducing performance for the
+ &vmguest; compared to paravirtualized drivers. On the &vmhost;, SR-IOV
+ must be supported by the I/O device, the CPU and chipset, the BIOS/EFI and
+ the hypervisor—.
+
+
+
+
+
+
+ Requirements for VFIO and SR-IOV
+
+ To be able to use the VFIO and SR-IOV features, the &vmhost; needs to
+ fulfill the following requirements:
+
+
+
+
+ IOMMU needs to be enabled in the BIOS/EFI.
+
+
+
+
+ For Intel CPUs, the kernel parameter intel_iommu=on
+ needs to be provided on the kernel command line. For more information, see
+ .
+
+
+
+
+ The VFIO infrastructure needs to be available. This can be achieved by
+ loading the kernel module
+ vfio_pci.
+
+
+
+
+
diff --git a/snippets/ha-cluster-restart-warning.xml b/snippets/ha-cluster-restart-warning.xml
index a7845a309..3393391e1 100644
--- a/snippets/ha-cluster-restart-warning.xml
+++ b/snippets/ha-cluster-restart-warning.xml
@@ -10,7 +10,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink">
Cluster restart required
- In this procedure, the script checks whether it is safe to restart the cluster services
+ In this procedure, the setup script checks whether it is safe to restart the cluster services
automatically. If any non-stonith resources are running, the script warns
you to restart the cluster services manually. This allows you to put the cluster into
maintenance mode first to avoid resource downtime. However, be aware that the resources will
diff --git a/tasks/Micro-upgrade-upgrading.xml b/tasks/Micro-upgrade-upgrading.xml
index bb084a51a..e41e020d9 100644
--- a/tasks/Micro-upgrade-upgrading.xml
+++ b/tasks/Micro-upgrade-upgrading.xml
@@ -45,10 +45,10 @@
- SLE Micro 5.5 or 6.0
+ SLE Micro 5.5, 6.0 or &productname; 6.1
- The upgrade to &productnumber; is possible only from the 5.5 or 6.0 versions.
+ The upgrade to &productnumber; is possible only from the 5.5, 6.0 or 6.1 versions.
diff --git a/tasks/agama-manual-installation.xml b/tasks/agama-manual-installation.xml
index 850a98f46..d2da9311f 100644
--- a/tasks/agama-manual-installation.xml
+++ b/tasks/agama-manual-installation.xml
@@ -740,8 +740,8 @@ hostname=&wsI; nameserver=&dnsip;
- Wait for the system installation to complete. It takes around 30 minutes to complete the
- installation.
+ Wait for the system installation to complete. The time of the installation depends on the
+ system's performance, the installation size and your network connection speed.
diff --git a/tasks/copy-file-with-rsyc-configure-rsync-server.xml b/tasks/copy-file-with-rsyc-configure-rsync-server.xml
index 68bc743c5..f8c180b92 100644
--- a/tasks/copy-file-with-rsyc-configure-rsync-server.xml
+++ b/tasks/copy-file-with-rsyc-configure-rsync-server.xml
@@ -50,7 +50,7 @@ in the assembly -->
/etc/rsyncd.conf and add the following
lines:log file = /var/log/rsync.log
-pid file = /var/lock/rsync.lock
+pid file = /var/run/rsync.pid
merge /etc/rsyncd.d
include /etc/rsyncd.d
@@ -61,7 +61,7 @@ include /etc/rsyncd.d
- /var/lock/rsync.lock is the file that
+ /var/run/rsync.pid is the file that
contains the PID of the running rsync daemon instance.
diff --git a/tasks/customizing-products-images.xml b/tasks/customizing-products-images.xml
index 3c645af9e..b20648c2c 100644
--- a/tasks/customizing-products-images.xml
+++ b/tasks/customizing-products-images.xml
@@ -155,7 +155,7 @@ Navigate to the directory with the build definition file.
After you have prepared all necessary files and the directory structure, you can use kiwi-ng to build
the image. To build it directly with kiwi-ng, you need to have the version
- included in &slea; 16.0 or higher. Alternatively, you can use a container.
+ included in &slm; 6.2 or higher. Alternatively, you can use a container.
Then, to build the image, proceed as follows. Make sure to have Podman
@@ -169,7 +169,13 @@ Navigate to the directory with the build definition file.
Or you can use KIWI directly after installing it:
- &prompt.sudo;zypper install python3-kiwi
+ &prompt.sudo;transactional-update pkg install python3-kiwi
+
+
+
+ Reboot the system after the package installation is complete. You can skip this step if
+ you intend to use the container.
+
diff --git a/tasks/deploy-sap-ansible-aws.xml b/tasks/deploy-sap-ansible-aws.xml
new file mode 100644
index 000000000..0415d1def
--- /dev/null
+++ b/tasks/deploy-sap-ansible-aws.xml
@@ -0,0 +1,268 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Deploying a &ha; distributed &s4h; system on AWS
+
+
+
+
+ This is a detailed step-by-step procedure to deploy a &ha; and distributed &s4h; system on Amazon Web Services (AWS).
+ The ansible-sap-playbooks package is used to automate infrastructure provisioning and SAP software installation, creating a resilient, scalable and enterprise-grade environment.
+ This procedure uses the pay-as-you-go (PAYG) model for the &suse; operating system, where the subscription cost is included in the AWS instance pricing.
+
+
+
+ This procedure is intended for system administrators, DevOps engineers and SAP Basis consultants who have a working knowledge of AWS, Ansible and &sap; principles.
+
+ With this procedure, you can:
+
+ Prepare the control node and the AWS cloud environment for automation.
+ Configure Ansible variables for a custom deployment.
+ Execute a single playbook to provision and configure the entire &s4h; system.
+ Achieve a fully functional and highly available system ready for post-installation activities.
+
+
+ The resulting &ha; distributed &s4h; system comprises of the following components:
+
+ Two &s4h; scale-up servers in a &ha; cluster
+ Two ASCS/ERS servers in a &ha; cluster
+ One primary application server (PAS)
+ One additional application server (AAS)
+
+ Deployment in the AWS cloud environment
+ Provisioning with Ansible requires a pre-existing environment and a service user with the necessary permissions.
+ Prepare your AWS cloud environment with the following:
+
+ VPC:
+
+ VPC access control list (ACL)
+ VPC Subnets
+ VPC Security Groups
+
+
+ Route 53 (private DNS)
+ Internet Gateway (SNAT)
+ Elastic File System (EFS) Network File System (NFS)
+ Bastion host (AWS EC2 VS)
+ Key Pair for hosts
+
+ For more details on infrastructure prerequisites, refer to the infrastructure prerequisites.
+
+ Create a new IAM role or policy appropriate access, using one of the following options:
+ Restricted access, refer to the authorization prerequisites.
+ Full access (not recommended):
+ arn:aws:iam::aws:policy/AmazonVPCFullAccess
+ arn:aws:iam::aws:policy/AmazonEC2FullAccess
+ arn:aws:iam::aws:policy/AmazonRoute53FullAccess
+
+
+
+ A bastion host is required when provisioning to a private cloud from outside, but it can be ignored if your control node has direct access to your cloud account and VPC.
+ This is accomplished by setting the variable sap_vm_provision_bastion_execution to false.
+
+
+ Prepare the control node.
+
+
+ Install the sles_sap_automation pattern, which contains all recommended packages for automation.
+ &prompt.sudo; zypper install --type pattern sles_sap_automation
+
+
+ Verify that the pattern and all the packages are installed.
+ &prompt.sudo; zypper info --type pattern sles_sap_automation
+
+ Refreshing service 'SUSE_Linux_Enterprise_Server_for_SAP_Applications_x86_64'.
+ Retrieving repository 'SLE-Product-SLES_SAP-16.0' metadata .........................................................................[done]
+ Building repository 'SLE-Product-SLES_SAP-16.0' cache ..............................................................................[done]
+ Loading repository data...
+ Reading installed packages...
+
+
+ Information for pattern sles_sap_automation:
+ --------------------------------------------
+ Repository : SLE-Product-SLES_SAP-16.0
+ Name : sles_sap_automation
+ Version : 16.0-160000.2.2
+ Arch : x86_64
+ Vendor : SUSE LLC <https://www.suse.com/>
+ Installed : Yes
+ Visible to User : Yes
+ Summary : automation deployment
+ Description :
+ ansible playbooks and roles
+ Contents :
+ S | Name | Type | Dependency
+ ---+------------------------------+---------+-----------
+ i | ansible | package | Required
+ i | ansible-linux-system-roles | package | Required
+ i | ansible-sap-infrastructure | package | Required
+ i | ansible-sap-install | package | Required
+ i | ansible-sap-operations | package | Required
+ i | ansible-sap-playbooks | package | Required
+ i+ | patterns-sap-automation | package | Required
+ i+ | patterns-sap-base_sap_server | package | Required
+
+ Install the AWS requirements, refer to the prerequisites.
+ &prompt.sudo; ansible-galaxy collection install amazon.aws
+ &prompt.sudo; zypper install python313-boto3
+
+
+
+
+Ansible playbooks are installed in /usr/share/ansible/playbooks.
+ To prevent changes from being overwritten by future package updates, these files should not be modified directly.
+For this procedure:
+
+ Use the scenario directory /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_s4hana_distributed_ha.
+ For simplicity, this procedure stores all customizations in the /playbooks directory. Specifically, we will use /playbooks/S01 as the dedicated directory for our system.
+ Define the minimal required variables and leave the predefined in place for a simplified configuration.
+
+
+
+ You can encrypt variables for playbooks in different ways to ensure security compliance.
+
+ Encrypt all variables in the scenario.
+ Split variables for reusability and encrypt only what is needed. For example:
+
+ Cloud credentials and information encrypted in Ansible vault.
+ Scenario passwords encrypted in Ansible vault.
+ Scenario non-sensitive variables without using Ansible vault.
+
+ This procedure uses a simplified variant for encrypting whole files.
+
+
+
+Copy the variable files from the source. Since this procedure provisions new servers in AWS, only two variable files are required:
+ansible_extravars.yml contains scenario-specific variables.
+ ansible_extravars_aws_ec2_vs.yml contains platform-specific variables.
+
+cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_s4hana_distributed_ha/ansible_extravars.yml /playbooks/S01/
+cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_s4hana_distributed_ha/ansible_extravars_aws_ec2_vs.yml /playbooks/S01/
+
+Update the variable file ansible_extravars.yml.
+
+ The following variable defines the provisioning method and platform.
+ sap_vm_provision_iac_type: "ansible"
+
+The following variable selects the desired topology from the sap_vm_provision_aws_ec2_vs_host_specifications_dictionary dictionary.
+
+ Predefined with minimum recommended instances for hosting an &sap; system.
+ Customizable as you can edit an existing entry or define a new plan.
+
+ sap_vm_provision_host_specification_plan: "xsmall_256gb"
+
+ The value of the variable sap_software_product is the key predefined in the sap_software_install_dictionary dictionary.
+ This dictionary follows the same rules as the one above. You can edit an existing entry or create a new one.
+sap_software_product: "sap_s4hana_2023_distributed"
+sap_install_media_detect_source_directory: "/software"
+The default execution ignores the list of &sap; media files in the sap_software_install_dictionary dictionary because the optional package ansible-sap-launchpad is not present.
+ This package is available in Package Hub and it allows you to download SAP Media Software when provided with valid S-User credentials with appropriate download authorization.
+ If this package is not installed, the playbook expects you to have all required files present in the path defined in the sap_install_media_detect_source_directory directory.
+ The variables used by sap_launchpad role are sap_id_user and sap_id_user_password.
+The password variables are simplified and use the master password. This procedure does not include plan text passwords, so you must
+ define them when updating the variable files.
+sap_hana_install_use_master_password: "y" # Use a master password for SAP HANA installation (y/n) (String).
+sap_hana_install_master_password: '' # The master password for SAP HANA (String).
+sap_swpm_master_password: '' # Master password for the SAP system (String).
+sap_swpm_ddic_000_password: '' # Password for the DDIC user in client 000 (String).
+sap_swpm_db_schema_abap_password: '' # Password for the ABAP schema user (String).
+sap_swpm_db_sidadm_password: '' # Password for the sid adm user in the database (String).
+sap_swpm_db_system_password: '' # Password for the SYSTEM user in the SAP HANA database (String).
+sap_swpm_db_systemdb_password: '' # Password for the SYSTEM user in the SAP HANA system database (String).
+
+The variables for virtual overlay IP addresses need to follow a specific convention for a given platform.
+AWS: the VIP must be **outside** of any VPC Subnet ranges (e.g. 192.168.100.102/32)
+sap_vm_provision_ha_vip_hana_primary: "ENTER_STRING_VALUE_HERE"
+sap_vm_provision_ha_vip_nwas_abap_ascs: "ENTER_STRING_VALUE_HERE"
+sap_vm_provision_ha_vip_nwas_abap_ers: "ENTER_STRING_VALUE_HERE"
+
+
+
+Update the variable file ansible_extravars_aws_ec2_vs.yml.
+
+
+ The variables for connecting through a bastion host.
+ sap_vm_provision_bastion_user: "ENTER_STRING_VALUE_HERE" # Bastion user name (String).
+ sap_vm_provision_bastion_ssh_port: "ENTER_STRING_VALUE_HERE" # Bastion user password (String).
+ sap_vm_provision_bastion_public_ip: "ENTER_STRING_VALUE_HERE" # Public IP of the bastion server (String).
+sap_vm_provision_ssh_bastion_private_key_file_path: "ENTER_STRING_VALUE_HERE" # Path to bastion server's SSH private key on the execution node (String).
+sap_vm_provision_ssh_host_private_key_file_path: "ENTER_STRING_VALUE_HERE" # Path to target host's SSH private key on the execution node (String).
+
+
+ The variables for connecting to a cloud environment.
+ sap_vm_provision_aws_access_key: "ENTER_STRING_VALUE_HERE" # Access key ID (String).
+ sap_vm_provision_aws_secret_access_key: "ENTER_STRING_VALUE_HERE" # Secret access key (String).
+ sap_vm_provision_aws_vpc_availability_zone: "ENTER_STRING_VALUE_HERE" # Availability zone (String).
+ sap_vm_provision_aws_vpc_subnet_id: "ENTER_STRING_VALUE_HERE"
+ sap_vm_provision_dns_root_domain: "ENTER_STRING_VALUE_HERE" # Root domain for DNS entries (e.g., example.com) (String).
+ sap_vm_provision_aws_key_pair_name_ssh_host_public_key: "ENTER_STRING_VALUE_HERE" # SSH Key name in AWS Console (String).
+ sap_vm_provision_aws_vpc_sg_names: "ENTER_STRING_VALUE_HERE" # Comma separated list of AWS VPC Service Group names (String).
+
+
+ The variables for selecting which OS image to use for provisioned instances are defined in the sap_vm_provision_aws_ec2_vs_host_os_image_dictionary
+ dictionary, which defines the search pattern for images. In this procedure, a PAYG image is used.
+ &sles; 16 images will be updated in the dictionary when cloud images are available.
+ sap_vm_provision_aws_ec2_vs_host_os_image: "sles-15-6-sap-ha"
+
+ The scenario specific variables for platform required for configuring &ha;.
+sap_vm_provision_nfs_mount_point: "ENTER_STRING_VALUE_HERE" # e.g. fs-0000.efs.eu-west-2.amazonaws.com:/
+sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "ENTER_STRING_VALUE_HERE" # e.g. fs-0000.efs.eu-west-2.amazonaws.com:/
+sap_vm_provision_nfs_mount_point_type: nfs4 # NFS version (String).
+sap_vm_provision_nfs_mount_point_opts:
+ nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,acl # NFS Mount options (String).
+ sap_ha_pacemaker_cluster_aws_vip_update_rt: "ENTER_STRING_VALUE_HERE" # AWS Routing Table ID (String).
+
+
+
+Encrypt files with Ansible vault. Files can be encrypted either using a prompt or with a password file.
+ This is easier in larger environments for reusability.
+Encryption with a prompt:
+&prompt.sudo;ansible-vault encrypt ansible_extravars.yml ansible_extravars_aws_ec2_vs.yml
+Encryption with a password file, which requires appropriate permissions and protection for security compliance:
+&prompt.sudo;ansible-vault encrypt --vault-password-file password.key ansible_extravars.yml ansible_extravars_aws_ec2_vs.yml
+
+Use the playbook directly from /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_s4hana_distributed_ha without editing it.
+If you need to adjust the playbook, we recommend copying it outside of the package directory.
+Execute the Ansible playbook with the Ansible vault key file.
+
+&prompt.sudo; ansible-playbook /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_s4hana_distributed_ha/ansible_playbook.yml \
+ --extra-vars "@./ansible_extravars.yml" \
+ --extra-vars "@./ansible_extravars_aws_ec2_vs.yml" \
+ --vault-password-file password.key
+
+
+ Outcome
+ You now have a fully operational, &ha; and distributed &s4h; landscape on AWS.
+ Your new environment includes:
+
+ A clustered &hana; database: With automated failover to ensure continuous availability.
+ A clustered ASCS/ERS Instance: Protecting the central services, which are critical for system operation.
+ Multiple application servers: To distribute the workload and provide scalability.
+ Shared storage: For binaries and transport files, accessible across the landscape.
+
+Next steps
+ You can now proceed with post-installation tasks, such as:
+
+ System Validation: Performing health checks and verifying the &ha; functionality.
+ SAP Configuration: Applying initial SAP basis configurations, such as SAP Transport Management System (STMS) and profile parameter tuning.
+ Backup and Recovery: Implementing a robust backup and recovery strategy for the &hana; database and application servers.
+ Monitoring: Integrating the new landscape with your existing monitoring solutions.
+
+
\ No newline at end of file
diff --git a/tasks/deploy-sap-ansible-gcp.xml b/tasks/deploy-sap-ansible-gcp.xml
new file mode 100644
index 000000000..284d1a412
--- /dev/null
+++ b/tasks/deploy-sap-ansible-gcp.xml
@@ -0,0 +1,251 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Deploying a sandbox &sap; BW/4HANA system on GCP
+
+
+
+
+ This is a detailed step-by-step procedure to deploy a non-production sandbox &sap; BW/4HANA system on Google Cloud Platform (GCP).
+ The ansible-sap-playbooks package is used to automate infrastructure provisioning and SAP software installation, resulting in a compact, single-node environment suitable for development, testing or training.
+ This procedures uses the bring-your-own-subscription (BYOS) model for the &suse; operating system.
+
+
+
+ This procedure is intended for system administrators, DevOps engineers and SAP Basis consultants who have a working knowledge of AWS, Ansible and &sap; principles.
+
+ With this procedure, you can:
+
+ Prepare the control node and the GCP cloud environment for automation.
+ Configure Ansible variables for the sandbox deployment.
+ Execute a single playbook to provision and configure the entire &sap; system.
+ Achieve a functional sandbox system ready for post-installation activities.
+
+
+ The resulting &s4h; system consists of a single server with the following components:
+
+ One &hana; database
+ One &sap; ABAP SAP Central Services (ASCS)
+ One primary application server (PAS)
+
+ Deployment on GCP cloud environment
+ Provisioning with Ansible requires a pre-existing environment and a service user with the necessary permissions.
+ Prepare your GCP cloud environment with the following:
+
+ VPC:
+
+ VPC Subnetwork
+
+
+ Compute Firewall
+ Compute Router
+ Cloud NAT (SNAT)
+
+ DNS Managed Zone (Private DNS)
+ Filestore (NFS) or NFS server
+ Bastion host (GCP CE VM)
+
+ For more details on infrastructure prerequisites, refer to the infrastructure prerequisites.
+
+ Create a new IAM role or policy with appropriate restricted access. For details, refer to the
+ authorization prerequisites.
+ A bastion host is required when provisioning to a private cloud from outside, but it can be ignored if your control node has direct access to your cloud account and VPC.
+ This is accomplished by setting the variable sap_vm_provision_bastion_execution to false.
+
+ Prepare the control node.
+
+
+ Install the sles_sap_automation pattern, which contains all recommended packages for automation.
+ &prompt.sudo; zypper install --type pattern sles_sap_automation
+
+
+ Verify that the pattern and all the packages are installed.
+ &prompt.sudo; zypper info --type pattern sles_sap_automation
+
+ Refreshing service 'SUSE_Linux_Enterprise_Server_for_SAP_Applications_x86_64'.
+ Retrieving repository 'SLE-Product-SLES_SAP-16.0' metadata .........................................................................[done]
+ Building repository 'SLE-Product-SLES_SAP-16.0' cache ..............................................................................[done]
+ Loading repository data...
+ Reading installed packages...
+
+
+ Information for pattern sles_sap_automation:
+ --------------------------------------------
+ Repository : SLE-Product-SLES_SAP-16.0
+ Name : sles_sap_automation
+ Version : 16.0-160000.2.2
+ Arch : x86_64
+ Vendor : SUSE LLC <https://www.suse.com/>
+ Installed : Yes
+ Visible to User : Yes
+ Summary : automation deployment
+ Description :
+ ansible playbooks and roles
+ Contents :
+ S | Name | Type | Dependency
+ ---+------------------------------+---------+-----------
+ i | ansible | package | Required
+ i | ansible-linux-system-roles | package | Required
+ i | ansible-sap-infrastructure | package | Required
+ i | ansible-sap-install | package | Required
+ i | ansible-sap-operations | package | Required
+ i | ansible-sap-playbooks | package | Required
+ i+ | patterns-sap-automation | package | Required
+ i+ | patterns-sap-base_sap_server | package | Required
+
+ Install the GCP requirements, refer prerequisites.
+ &prompt.sudo; ansible-galaxy collection install google.cloud
+ &prompt.sudo; zypper install python313-google-auth
+
+
+
+
+Ansible playbooks are installed in /usr/share/ansible/playbooks .
+ To prevent changes from being overwritten by future package updates, these files should not be modified directly.
+For this procedure:
+
+ Use the scenario directory /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_bw4hana_sandbox.
+ For simplicity, this procedure stores all customizations in the /playbooks directory. Specifically, we will use /playbooks/B01 as the dedicated directory for our system.
+ Define the minimal required variables and leave the predefined in place for a simplified configuration.
+
+
+
+ You can encrypt variables for playbooks in different ways to ensure security compliance.
+
+ Encrypt all variables in the scenario.
+ Split variables for reusability and encrypt only what is needed. For example:
+
+ Cloud credentials and information encrypted in Ansible vault.
+ Scenario passwords encrypted in Ansible vault.
+ Scenario non-sensitive variables without using Ansible vault.
+
+ In this procedure, we use a simplified variant for encrypting whole files.
+
+
+
+Copy the variable files from the source. Since we are provisioning new servers in GCP, only two variable files are required:
+ansible_extravars.yml contains scenario-specific variables.
+ ansible_extravars_gcp_ce_vm.yml contains platform-specific variables.
+
+cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_bw4hana_sandbox/ansible_extravars.yml /playbooks/B01/
+
+cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_bw4hana_sandbox/ansible_extravars_gcp_ce_vm.yml /playbooks/B01/
+
+Update the variable file ansible_extravars.yml.
+
+ The following variable defines the provisioning method.
+ sap_vm_provision_iac_type: "ansible"
+
+The following variable selects the desired topology from the sap_vm_provision_gcp_ce_vm_host_specifications_dictionary dictionary.
+
+ Predefined with minimum recommended instances for hosting an &sap; system.
+ Customizable as you can edit an existing entry or define a new plan.
+
+ sap_vm_provision_host_specification_plan: "xsmall_256gb"
+
+ The value of the variable sap_software_product is the key predefined in the sap_software_install_dictionary dictionary.
+ This dictionary follows the same rules as the one above. You can edit an existing entry or create a new one.
+sap_software_product: "sap_bw4hana_2023_sandbox"
+sap_install_media_detect_source_directory: "/software"
+The default execution ignores the list of &sap; media files in the sap_software_install_dictionary dictionary because the optional package ansible-sap-launchpad is not present.
+ This package is available in Package Hub and it allows you to download SAP Media Software when provided with valid S-User credentials with appropriate download authorization.
+ If this package is not installed, the playbook expects you to have all required files present in the path defined in the sap_install_media_detect_source_directory directory.
+ The variables used by sap_launchpad role are sap_id_user and sap_id_user_password.
+
+
+
+
+ The password variables are simplified and use the master password. This procedure does not include plan text passwords, so you must
+ define them when updating the variable files.
+sap_hana_install_use_master_password: "y" # Use a master password for SAP HANA installation (y/n) (String).
+sap_hana_install_master_password: '' # The master password for SAP HANA (String).
+sap_swpm_master_password: '' # Master password for the SAP system (String).
+sap_swpm_ddic_000_password: '' # Password for the DDIC user in client 000 (String).
+sap_swpm_db_schema_abap_password: '' # Password for the ABAP schema user (String).
+sap_swpm_db_sidadm_password: '' # Password for the "sid" adm user in the database (String).
+sap_swpm_db_system_password: '' # Password for the SYSTEM user in the SAP HANA database (String).
+sap_swpm_db_systemdb_password: '' # Password for the SYSTEM user in the SAP HANA system database (String).
+
+
+Update the variable file ansible_extravars_gcp_ce_vm.yml.
+
+
+ The variables for connecting through a bastion host.
+ sap_vm_provision_bastion_user: "ENTER_STRING_VALUE_HERE" # Bastion user name (String).
+ sap_vm_provision_bastion_ssh_port: "ENTER_STRING_VALUE_HERE" # Bastion user password (String).
+ sap_vm_provision_bastion_public_ip: "ENTER_STRING_VALUE_HERE" # Public IP of the bastion server (String).
+sap_vm_provision_ssh_bastion_private_key_file_path: "ENTER_STRING_VALUE_HERE" # Path to bastion server's SSH private key on the execution node (String).
+sap_vm_provision_ssh_host_private_key_file_path: "ENTER_STRING_VALUE_HERE" # Path to target host's SSH private key on the execution node (String).
+sap_vm_provision_ssh_host_public_key_file_path: "ENTER_STRING_VALUE_HERE" # Path to bastion public host key on execution node (String).
+
+
+ The variables for connecting to a cloud environment.
+ sap_vm_provision_gcp_credentials_json: "ENTER_STRING_VALUE_HERE" # Path to JSON credentials file (String).
+ sap_vm_provision_gcp_project: "ENTER_STRING_VALUE_HERE" # Project name (String).
+ sap_vm_provision_gcp_region_zone: "ENTER_STRING_VALUE_HERE" # Region zone (String).
+ sap_vm_provision_gcp_vpc_name: "ENTER_STRING_VALUE_HERE" # VPC name (String).
+ sap_vm_provision_gcp_vpc_subnet_name: "ENTER_STRING_VALUE_HERE" # VPC Subnet name (String).
+ sap_vm_provision_dns_root_domain: "ENTER_STRING_VALUE_HERE" # Root domain for DNS entries (e.g., example.com) (String).
+
+
+
+
+ The variables for selecting which OS image to use for provisioned instances are defined in the sap_vm_provision_gcp_ce_vm_host_os_image_dictionary
+ dictionary, which defines the search pattern for images. In this procedure, a BYOS image is used.
+ &sles; 16 images will be updated in the dictionary when cloud images are available.
+ sap_vm_provision_gcp_ce_vm_host_os_image: "sles-15-6-sap-byos"
+ sap_vm_provision_os_online_registration_user: "ENTER_STRING_VALUE_HERE" # User/Email for SUSEConnect registration (String).
+ sap_vm_provision_os_online_registration_passcode: "ENTER_STRING_VALUE_HERE" # Registration code for SUSEConnect registration (String).
+
+ Encrypt files with Ansible vault. Files can be encrypted either using a prompt or with a password file.
+ This is easier in larger environments for reusability.
+Encryption with a prompt:
+&prompt.sudo;ansible-vault encrypt ansible_extravars.yml ansible_extravars_gcp_ce_vm.yml
+Encryption with a password file, which requires appropriate permissions and protection for security compliance:
+&prompt.sudo;ansible-vault encrypt --vault-password-file password.key ansible_extravars.yml ansible_extravars_gcp_ce_vm.yml
+
+Use the playbook directly from /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_bw4hana_sandbox without editing it.
+If you need to adjust the playbook, we recommend copying it outside of the package directory.
+Execute the Ansible playbook with the Ansible vault key file.
+
+&prompt.sudo;ansible-playbook /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_bw4hana_sandbox/ansible_playbook.yml \
+ --extra-vars "@./ansible_extravars.yml" \
+ --extra-vars "@./ansible_extravars_gcp_ce_vm.yml" \
+ --vault-password-file password.key
+
+
+ Outcome
+ You now have a fully operational, non-production sandbox &sap; BW/4HANA system running on a single virtual machine in GCP.
+ Your new environment includes:
+
+ A Single GCP Compute Engine VM: Hosting all SAP instances.
+ &hana; Database: The underlying database for your BW/4HANA system.
+ &sap; ASCS and PAS Instances: The central services and primary application server are installed on the same host.
+
+Next steps
+ You can now proceed with post-installation tasks, such as:
+
+ System Validation: Perform basic health checks to ensure the SAP system is running correctly.
+ Development and Prototyping: Begin developing and testing new data models and reports in a safe, isolated environment.
+ &sap; Configuration Apply any necessary post-installation configurations or client copies.
+ Learning and Exploration: Use the system to explore the features of SAP BW/4HANA.
+
+
+
\ No newline at end of file
diff --git a/tasks/deploy-sap-ansible-hosts.xml b/tasks/deploy-sap-ansible-hosts.xml
new file mode 100644
index 000000000..13ff307c4
--- /dev/null
+++ b/tasks/deploy-sap-ansible-hosts.xml
@@ -0,0 +1,264 @@
+
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Deploying a sandbox &sap; ECC on &hana; system on existing hosts
+
+
+
+
+ This is a detailed step-by-step procedure to deploy a non-production sandbox &sap; ECC on an &hana; system on existing hosts.
+ The ansible-sap-playbooks package is used to automate infrastructure provisioning and SAP software installation, resulting in a compact, single-node environment suitable for development, testing or training.
+
+
+
+ This procedure is intended for system administrators, DevOps engineers and SAP Basis consultants who have a working knowledge of AWS, Ansible and &sap; principles.
+
+ With this procedure, you can:
+
+ Prepare the control node and existing hosts.
+ Configure Ansible variables for the sandbox deployment.
+ Execute a single playbook to provision and configure the entire &sap; system.
+ Achieve a fully functional system ready for post-installation activities.
+
+ The resulting &sap; ECC system consists of a single server with the following components:
+
+ One &hana; database
+ One &sap; ABAP SAP Central Services (ASCS)
+ One primary application server (PAS)
+
+
+
+ Deployment on existing hosts
+ This procedure assumes you have an existing server or a virtual machine on which you will install the &sap; ECC sandbox system.
+ It is a must that you provide this host and ensure it is ready for Ansible automation. It is a requirement to establish a secure and
+ passwordless SSH connection from your Ansible control node to the target host.
+ Create an Ansible inventory file.
+
+Ansible needs to know which host(s) to target. Copy the predefined inventory /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox/optional/ansible_inventory_noninteractive.yml
+to your working directory /playbooks/E01/.
+
+
+Update the file with the connection details of your hosts. For example:
+
+ ---
+
+ hana_primary:
+ hosts:
+ sap-s4hana:
+ ansible_host: 192.168.1.100
+ ansible_port: 22
+ ansible_user: root
+ ansible_ssh_private_key_file: "/playbook/E01/hosts_rsa"
+
+This group name is used in the example playbook and renaming it would require customizing the playbook to change all hosts: entries.
+
+
+
+Configure passwordless SSH authentication.
+For automation, the Ansible control node must be able to connect to the target host as &rootuser; without a password prompt.
+This is achieved using SSH keys.
+For simplicity, this procedure uses &rootuser; login, which is acceptable for a temporary sandbox environment.
+For production systems, using a dedicated, non-root user is the recommended security practice.
+
+
+Generate an SSH key if you do not have one. Press Enter to accept the default location and leave the passphrase empty for passwordless access.
+&prompt.root; ssh-keygen -t rsa
+
+Copy the SSH public key to the target host.
+The easiest way to copy your public key to the target host's authorized_keys file is using the ssh-copy-id utility.
+This command prompts you for the root password of the target host one last time.
+&prompt.root; ssh-copy-id root@192.168.1.100
+
+
+
+Verify that Ansible can successfully connect to and gather facts from your target host.
+&prompt.sudo; ansible -i ansible_inventory_noninteractive.yml all -m ping
+A successful connection test shows a ping-pong response.
+
+ 192.168.1.100 | SUCCESS => {
+ "ansible_facts": {
+ "discovered_interpreter_python": "/usr/bin/python3.13"
+ },
+ "changed": false,
+ "ping": "pong"
+}
+
+
+ Prepare the control node.
+
+
+ Install the sles_sap_automation pattern, which contains all recommended packages for automation.
+ &prompt.sudo; zypper install --type pattern sles_sap_automation
+
+
+ Verify that the pattern and all the packages are installed.
+ &prompt.sudo; zypper info --type pattern sles_sap_automation
+
+ Refreshing service 'SUSE_Linux_Enterprise_Server_for_SAP_Applications_x86_64'.
+ Retrieving repository 'SLE-Product-SLES_SAP-16.0' metadata .........................................................................[done]
+ Building repository 'SLE-Product-SLES_SAP-16.0' cache ..............................................................................[done]
+ Loading repository data...
+ Reading installed packages...
+
+
+ Information for pattern sles_sap_automation:
+ --------------------------------------------
+ Repository : SLE-Product-SLES_SAP-16.0
+ Name : sles_sap_automation
+ Version : 16.0-160000.2.2
+ Arch : x86_64
+ Vendor : SUSE LLC <https://www.suse.com/>
+ Installed : Yes
+ Visible to User : Yes
+ Summary : automation deployment
+ Description :
+ ansible playbooks and roles
+ Contents :
+ S | Name | Type | Dependency
+ ---+------------------------------+---------+-----------
+ i | ansible | package | Required
+ i | ansible-linux-system-roles | package | Required
+ i | ansible-sap-infrastructure | package | Required
+ i | ansible-sap-install | package | Required
+ i | ansible-sap-operations | package | Required
+ i | ansible-sap-playbooks | package | Required
+ i+ | patterns-sap-automation | package | Required
+ i+ | patterns-sap-base_sap_server | package | Required
+
+
+
+
+ Ansible playbooks are installed in /usr/share/ansible/playbooks .
+ To prevent changes from being overwritten by future package updates, these files should not be modified directly.
+ For this procedure:
+
+ Use the scenario directory /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox.
+ For simplicity, this procedure stores all customizations in the /playbooks directory. Specifically, we will use /playbooks/E01 as the dedicated directory for our system.
+ Define the minimal required variables and leave the predefined in place for a simplified configuration.
+ Ansible playbooks for &sap; are designed to skip provisioning when existing hosts are used, but the remaining tasks are all retained.
+ This means that there will be some variables that are part of provisioning, but they can be used even for existing hosts.
+ For example, the sap_vm_provision_existing_hosts_host_specifications_dictionary dictionary will not provision new infrastructure, but it will be used to configure storage if defined.
+
+
+
+ You can encrypt variables for playbooks in different ways to ensure security compliance.
+
+ Encrypt all variables in the scenario.
+ Split variables for reusability and encrypt only what is needed. For example:
+
+ Cloud credentials and information encrypted in Ansible vault.
+ Scenario passwords encrypted in Ansible vault.
+ Scenario non-sensitive variables without using Ansible vault.
+
+ In this procedure, we use a simplified variant for encrypting whole files.
+
+
+
+ Copy the variable files from the source. Since we are using existing hosts, only two variable files are required:
+ ansible_extravars.yml contains scenario-specific variables.
+ optional/ansible_extravars_existing_hosts.yml contains platform-specific variables.
+
+ cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox/ansible_extravars.yml /playbooks/E01/
+
+ cp /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox/optional/ansible_extravars_existing_hosts.yml /playbooks/E01/
+
+ Update the variable file ansible_extravars.yml.
+
+ s the provisioning method.
+ sap_vm_provision_iac_type: "ansible"
+
+ The following variable selects the desired topology from the sap_vm_provision_existing_hosts_host_specifications_dictionary dictionary.
+
+ Predefined with minimum recommended instances for hosting an &sap; system.
+ Customizable as you can edit an existing entry or define a new plan.
+
+ sap_vm_provision_host_specification_plan: "xsmall_256gb"
+
+ The value of the variable sap_software_product is the key predefined in the sap_software_install_dictionary dictionary.
+ This dictionary follows the same rules as the one above. You can edit an existing entry or create a new one.
+ sap_software_product: "sap_ecc6_ehp8_hana_sandbox"
+ sap_install_media_detect_source_directory: "/software"
+ The default execution ignores the list of &sap; media files in the sap_software_install_dictionary dictionary because the optional package ansible-sap-launchpad is not present.
+ This package is available in Package Hub and it allows you to download SAP Media Software when provided with valid S-User credentials with appropriate download authorization.
+ If this package is not installed, the playbook expects you to have all required files present in the path defined in the sap_install_media_detect_source_directory directory.
+ The variables used by sap_launchpad role are sap_id_user and sap_id_user_password.
+
+
+
+
+ The password variables are simplified and use the master password. This procedure does not include plan text passwords, so you must
+ define them when updating the variable files.
+ sap_hana_install_use_master_password: "y" # Use a master password for SAP HANA installation (y/n) (String).
+ sap_hana_install_master_password: '' # The master password for SAP HANA (String).
+ sap_swpm_master_password: '' # Master password for the SAP system (String).
+ sap_swpm_ddic_000_password: '' # Password for the DDIC user in client 000 (String).
+ sap_swpm_db_schema_abap_password: '' # Password for the ABAP schema user (String).
+ sap_swpm_db_sidadm_password: '' # Password for the "sid" adm user in the database (String).
+ sap_swpm_db_system_password: '' # Password for the SYSTEM user in the SAP HANA database (String).
+ sap_swpm_db_systemdb_password: '' # Password for the SYSTEM user in the SAP HANA system database (String).
+
+ Update the variable file ansible_extravars_existing_hosts.ym.
+
+
+ The variables for connecting to existing hosts.
+# Path to target host's SSH private key on the execution node (String).
+# This SSH key has to be added to authorized_keys on managed node.
+sap_vm_provision_ssh_host_private_key_file_path: "ENTER_STRING_VALUE_HERE"
+ Ansible playbooks for &sap; use the Ansible fact ansible_domain, which requires a valid /etc/hosts configuration on your existing hosts with FQDN to obtain the domain name.
+
+
+
+ Encrypt files with Ansible vault. Files can be encrypted either using a prompt or with a password file.
+ This is easier in larger environments for reusability.
+ Encryption with a prompt:
+ &prompt.sudo;ansible-vault encrypt ansible_extravars.yml ansible_extravars_existing_hosts.yml
+ Encryption with a password file, which requires appropriate permissions and protection for security compliance:
+ &prompt.sudo;ansible-vault encrypt --vault-password-file password.key ansible_extravars.yml ansible_extravars_existing_hosts.yml
+
+ Use the playbook directly from usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox without editing it.
+ If you need to adjust the playbook, we recommend copying it outside of the package directory.
+ Execute the Ansible playbook with the Ansible vault key file.
+
+ &prompt.sudo; ansible-playbook /usr/share/ansible/playbooks/ansible.playbooks_for_sap/deploy_scenarios/sap_ecc_hana_sandbox/ansible_playbook.yml \
+ --extra-vars "@./ansible_extravars.yml" \
+ --extra-vars "@./ansible_extravars_existing_hosts.yml" \
+ --vault-password-file password.key \
+ --inventory ansible_inventory_noninteractive.yml
+
+
+
+ Outcome
+ You now have a fully operational, non-production &sap; ECC on an &hana; sandbox system installed on your existing host.
+ Your new sandbox environment includes:
+
+ SAP ECC 6.0 EHP8: The core application is installed and ready for use.
+ SAP HANA database: The system is running on an &hana; database.
+ Single-node architecture: The HANA database, ASCS and PAS instances are all running on the provided single host.
+
+ Next steps
+ You can now proceed with post-installation tasks, such as:
+
+ System Validation: Log in to the &sap; GUI, run basic transaction codes like SM50 (Process Overview) or ST22 (ABAP Runtime Errors) to ensure the system is operational.
+ Initial Configuration: Perform any required basis configurations, such as setting up the SAP Transport Management System (STMS) or applying custom profile parameters via RZ10.
+ Development and Customization: Begin ABAP development, or use transaction code SPRO to start system customization.
+ Learning and Exploration: Use the system to explore standard SAP ECC modules and functionality in a safe, isolated environment.
+
+
+
diff --git a/tasks/deployment-network-image-sles.xml b/tasks/deployment-network-image-sles.xml
index 0d800ef84..c9c076090 100644
--- a/tasks/deployment-network-image-sles.xml
+++ b/tasks/deployment-network-image-sles.xml
@@ -40,7 +40,7 @@ in the assembly -->
- Enter the UEFI or legacy BIOS boot menu and select booting using PXE or HTTP. Continue to
+ Enter the boot menu and select booting using PXE or HTTP. Continue to
boot the machine.
diff --git a/tasks/gnome-remote-desktop-configuring.xml b/tasks/gnome-remote-desktop-configuring.xml
index a24967efe..ebb28dd5f 100644
--- a/tasks/gnome-remote-desktop-configuring.xml
+++ b/tasks/gnome-remote-desktop-configuring.xml
@@ -74,7 +74,7 @@
Create a directory for the TLS encryption key and certificate:
-&prompt.sudo;-u gnome-remote-desktop mkdir -p ~/.local/share/gnome-remote-desktop/
+&prompt.sudo;-u gnome-remote-desktop mkdir -p ~gnome-remote-desktop/.local/share/gnome-remote-desktop/
The gnome-remote-desktop user is created
automatically when the gnome-remote-desktop package is installed. Its
@@ -93,8 +93,8 @@
&prompt.sudo;-u gnome-remote-desktop openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj \
/C=COUNTRY_CODE/ST=STATE/L=LOCALITY/O=ORGANIZATION/CN=&exampledomain1; \
- -out ~/.local/share/gnome-remote-desktop/tls.crt \
- -keyout ~/.local/share/gnome-remote-desktop/tls.key
+ -out ~gnome-remote-desktop/.local/share/gnome-remote-desktop/tls.crt \
+ -keyout ~gnome-remote-desktop/.local/share/gnome-remote-desktop/tls.key
Replace the country code, state, locality, organization, and common name or omit
parameters you do not need. For the country code, use a two-letter ISO
@@ -107,9 +107,11 @@
If you prefer an interactive command to guide you through the certificate generation,
use certtool from the gnutls package:
-&prompt.sudo;zypper in gnutls
-&prompt.sudo;-u gnome-remote-desktop certtool --generate-privkey --outfile ~/.local/share/gnome-remote-desktop/tls.key
-&prompt.sudo;-u gnome-remote-desktop certtool --generate-self-signed --load-privkey ~/.local/share/gnome-remote-desktop/tls.key
+&prompt.sudo;zypper in gnutls
+&prompt.sudo;-u gnome-remote-desktop certtool --generate-privkey \
+ --outfile ~gnome-remote-desktop/.local/share/gnome-remote-desktop/tls.key
+&prompt.sudo;-u gnome-remote-desktop certtool --generate-self-signed \
+ --load-privkey ~gnome-remote-desktop/.local/share/gnome-remote-desktop/tls.key
diff --git a/tasks/ha-fencing-creating-resources-for-physical-device.xml b/tasks/ha-fencing-creating-resources-for-physical-device.xml
index 87f8e2be7..fd70e7fdd 100644
--- a/tasks/ha-fencing-creating-resources-for-physical-device.xml
+++ b/tasks/ha-fencing-creating-resources-for-physical-device.xml
@@ -194,7 +194,7 @@
If the fencing resources have the status Stopped, the nodes might
have failed to connect to the fencing device. You can check the connection with the
- command line tool for your specific fence agent. For more information, run the
+ command-line tool for your specific fence agent. For more information, run the
man fence_AGENT command.
diff --git a/tasks/ha-sbd-setting-up-diskbased.xml b/tasks/ha-sbd-setting-up-diskbased.xml
index ef8711ff2..d8a684428 100644
--- a/tasks/ha-sbd-setting-up-diskbased.xml
+++ b/tasks/ha-sbd-setting-up-diskbased.xml
@@ -48,7 +48,7 @@
- The &sbd; service is not running.
+ &sbd; is not configured yet.
diff --git a/tasks/ha-sbd-setting-up-diskless.xml b/tasks/ha-sbd-setting-up-diskless.xml
new file mode 100644
index 000000000..30067af7b
--- /dev/null
+++ b/tasks/ha-sbd-setting-up-diskless.xml
@@ -0,0 +1,141 @@
+
+
+ %entities;
+]>
+
+
+
+
+
+ Setting up diskless &sbd;
+
+
+
+ Diskless &sbd; fences nodes by using only the watchdog, without a shared storage device.
+ However, diskless &sbd; cannot handle a split-brain scenario for a two-node cluster.
+ This configuration should only be used for clusters with more than two nodes, or in
+ combination with &qdevice; to help handle split-brain scenarios.
+
+
+ This procedure explains how to configure &sbd; after the cluster is already installed and
+ running, not during the initial cluster setup.
+
+
+
+
+
+ Cluster restart required
+
+ In this procedure, the setup script has to restart the cluster services before it can modify
+ the stonith-watchdog-timeout. Therefore, if any resources are running, you
+ must put the cluster into maintenance mode before running the script. This allows the services
+ managed by the resources to keep running while the cluster restarts. However, be aware that
+ the resources will not have cluster protection while in maintenance mode.
+
+
+
+
+ Requirements
+
+
+ An existing &ha; cluster is already running.
+
+
+
+
+ &sbd; is not configured yet.
+
+
+
+
+ All nodes have a watchdog device, and the correct watchdog kernel module is loaded.
+
+
+
+
+
+ Perform this procedure on only one cluster node:
+
+
+
+
+ Log in either as the &rootuser; user or as a user with sudo privileges.
+
+
+
+
+ Check whether any resources are running:
+
+&prompt.user;sudo crm status
+
+
+
+ If any resources are running, put the cluster into maintenance mode:
+
+&prompt.user;sudo crm maintenance on
+
+ In this state, the cluster stops monitoring all resources. This allows the services
+ managed by the resources to keep running while the cluster restarts. However, be aware
+ that the resources will not have cluster protection while in maintenance mode.
+
+
+
+
+ Run the &sbd; stage of the cluster setup script, using the option
+ (or ) to specify diskless &sbd;:
+
+&prompt.user;sudo crm cluster init sbd --enable-sbd
+
+ Additional options
+
+
+ If multiple watchdogs are available, you can use the option
+ (or ) to choose which watchdog to use. Specify either the device name
+ (for example, /dev/watchdog1) or the driver name (for example,
+ iTCO_wdt).
+
+
+
+
+ The script updates the &sbd; configuration file and restarts the cluster services, then
+ updates additional timeout settings. Unlike other node fencing mechanisms, diskless &sbd;
+ does not need a fence agent.
+
+
+
+
+ If the cluster is still in maintenance mode, put it back into normal operation:
+
+&prompt.user;sudo crm maintenance off
+
+
+
+ Check the &sbd; configuration:
+
+&prompt.user;sudo crm sbd configure show
+
+
+ The output of this command shows the enabled settings in the
+ /etc/sysconfig/sbd file and the &sbd;-related cluster settings.
+
+
+
+
+ Check the status of SBD:
+
+&prompt.user;sudo crm sbd status
+
+ The output of this command shows the type of &sbd; configured, information about the &sbd;
+ watchdog, and the status of the &sbd; service.
+
+
+
+
diff --git a/tasks/install-ansible-role.xml b/tasks/install-ansible-role.xml
index eee4d1504..a18cb0557 100644
--- a/tasks/install-ansible-role.xml
+++ b/tasks/install-ansible-role.xml
@@ -29,7 +29,7 @@ https://documentation.suse.com/sles/15-SP5/html/SLES-all/cha-deployment-prep-uef
Install the roles on a control node:
-&prompt.sudo; zypper install ansible-linux-system-roles
+&prompt.sudo;zypper install ansible-linux-system-roles
The control node is where Ansible and the Linux system roles are installed.
diff --git a/tasks/klp-activate-cli.xml b/tasks/klp-activate-cli.xml
index 6d22e184e..932e2c614 100644
--- a/tasks/klp-activate-cli.xml
+++ b/tasks/klp-activate-cli.xml
@@ -20,7 +20,7 @@
install the following pattern:
- &prompt.root;zypper install -t pattern lp_sles
+ &prompt.root;zypper install -t pattern kernel_livepatching
diff --git a/tasks/networkmanager-nmcli-configure.xml b/tasks/networkmanager-nmcli-configure.xml
index 6a6c63e3d..0d6f79e89 100644
--- a/tasks/networkmanager-nmcli-configure.xml
+++ b/tasks/networkmanager-nmcli-configure.xml
@@ -311,7 +311,7 @@ creation process or beforehand.To configure p2p-dev-wlan0 as a port,
create a connection profile:&prompt.sudo;nmcli connection add type wifi-p2p slave-type bond con-name bond0-port1 ifname p2p-dev-wlan0 master bond0
-A new profile is created for p2p-dev-wlan0 and added to the bond0 connection. The name of the bond is bond0.
+This creates a new profile for p2p-dev-wlan0 and adds it to the bond0 connection. The name of the bond is bond0.To assign virbr0 to a bond:&prompt.sudo;nmcli connection modify virbr0 master bond0The connection profile for virbr0 is added to the bond0 connection.
@@ -582,7 +582,7 @@ but you have not created any bond0 in this section. -->
Create a bridge interface:&prompt.sudo;nmcli connection add type bridge con-name CONNECTION_NAME ifname BRIDGE_NAME
- For example, we created a bridge: bridge0 by running the command:
+ For example, create a bridge bridge0 by running the command:
&prompt.sudo;nmcli connection add type bridge con-name bridge0 ifname bridge0
@@ -606,11 +606,11 @@ connection profile).
Add interfaces to the bridge:&prompt.sudo;nmcli connection add type wifi-p2p slave-type bridge con-name bridge0-port1 ifname p2p-dev-wlan0 master bridge0
-A new profile is created for p2p-dev-wlan0 and added to the bridge0 connection.
+This creates a new profile for p2p-dev-wlan0 and adds it to the bridge0 connection.To assign an existing connection to the bridge:&prompt.sudo;nmcli connection modify bond0 master bridge0
-The connection profile for bond0
- is added to the bridge0 connection.
+This adds the connection profile for bond0
+ to the bridge0 connection.Restart the connection:&prompt.sudo;nmcli connection up bond0
diff --git a/tasks/podman-checkpoint.xml b/tasks/podman-checkpoint.xml
index e0f12b8b1..3d84a19ec 100644
--- a/tasks/podman-checkpoint.xml
+++ b/tasks/podman-checkpoint.xml
@@ -33,7 +33,7 @@ in the assembly -->
On &productname;, the podman container checkpoint is
not available. To checkpoint a container, use the &cockpit; Web interface
as described in the Further actions
+xlink:href="https://documentation.suse.com/sle-micro/6.2/html/Micro-cockpit/index.html#further-actions-with-containers">Further actions
with running containers section.
diff --git a/tasks/post-installation-troubleshooting-agama-automated-installation.xml b/tasks/post-installation-troubleshooting-agama-automated-installation.xml
index 58b2e1100..a071385bf 100644
--- a/tasks/post-installation-troubleshooting-agama-automated-installation.xml
+++ b/tasks/post-installation-troubleshooting-agama-automated-installation.xml
@@ -18,7 +18,7 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- Post installation troubleshooting of automated installation using &agama;
diff --git a/tasks/selinux-packages.xml b/tasks/selinux-packages.xml
index 34fc1ac6e..a9553aa9b 100644
--- a/tasks/selinux-packages.xml
+++ b/tasks/selinux-packages.xml
@@ -68,6 +68,8 @@
Advanced policy development requires additional -devel
packages, which can be installed by:
+
+
&prompt.sudo;zypper install selinux-policy-devel policycoreutils-devel
diff --git a/tasks/sle16-upgrade-perform.xml b/tasks/sle16-upgrade-perform.xml
index 453a08d30..7fa2f76cf 100644
--- a/tasks/sle16-upgrade-perform.xml
+++ b/tasks/sle16-upgrade-perform.xml
@@ -114,7 +114,8 @@
The run_migration utility uses &kexec; and does
not work in &xen;-based environments. If &kexec; causes a kernel
- panic, refer to this TID: https://www.suse.com/support/kb/doc/?id=000019733 and set the
+ panic, refer to the TID at and set the
customization option.
diff --git a/tasks/sles-pxe-server-prepare-installer.xml b/tasks/sles-pxe-server-prepare-installer.xml
index 435aa7a6c..d603418e4 100644
--- a/tasks/sles-pxe-server-prepare-installer.xml
+++ b/tasks/sles-pxe-server-prepare-installer.xml
@@ -101,8 +101,8 @@
to installation repositories during system installation. This corresponds with the
SLES-16.0 Online Installation boot menu entry in GRUB.
-
- Extracting files from Online ISO (x86_64 and aarch64)
+
+ Extracting files from Online ISO
Create the directory structure for installer files:
@@ -117,51 +117,48 @@
- Copy the kernel and initrd files:
+ Copy the kernel files.
+
+
+
+ For &x86-64; and &aarch64;:
+ &prompt.sudo;cp /mnt/boot/ARCH/loader/linux /srv/tftpboot/boot/images/SLES-16.0/ARCH/
-&prompt.sudo;cp /mnt/boot/ARCH/loader/initrd /srv/tftpboot/boot/images/SLES-16.0/ARCH/
-
-
-
- Copy the compressed root file system:
-
-&prompt.sudo;cp /mnt/LiveOS/squashfs.img /srv/tftpboot/boot/images/SLES-16.0/ARCH/
-
-
-
- Unmount the ISO image:
-
-&prompt.sudo;umount /mnt
-
-
-
- Extracting files from Online ISO (ppc64le)
-
-
- Create the directory structure:
-
-&prompt.sudo;mkdir -p /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
-
-
-
- Mount the ISO image:
-
-&prompt.sudo;mount -oro,loop /srv/install/iso/SLES-16.0-Online-ppc64le-BUILD.install.iso /mnt
-
-
-
- Copy the kernel and initrd files (note the different path structure for ppc64le):
-
+
+
+
+ For &ppc64le;:
+ &prompt.sudo;cp /mnt/boot/ppc64le/linux /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
+
+
+
+
+
+ Copy the initrd files.
+
+
+
+
+ For &x86-64; and &aarch64;:
+
+&prompt.sudo;cp /mnt/boot/ARCH/loader/initrd /srv/tftpboot/boot/images/SLES-16.0/ARCH/
+
+
+
+ For &ppc64le;:
+ &prompt.sudo;cp /mnt/boot/ppc64le/initrd /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
+
+
Copy the compressed root file system:
-&prompt.sudo;cp /mnt/LiveOS/squashfs.img /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
+&prompt.sudo;cp /mnt/LiveOS/squashfs.img /srv/tftpboot/boot/images/SLES-16.0/ARCH/
@@ -170,7 +167,7 @@
&prompt.sudo;umount /mnt
-
+
Using Full ISO images
@@ -198,11 +195,42 @@
- Copy the kernel and initrd files (adjust paths for ppc64le as shown in previous
- procedures):
+ Copy the kernel files.
+
+
+
+ For &x86-64; and &aarch64;:
+ &prompt.sudo;cp /mnt/boot/ARCH/loader/linux /srv/tftpboot/boot/images/SLES-16.0/ARCH/
+
+
+
+ For &ppc64le;:
+
+&prompt.sudo;cp /mnt/boot/ppc64le/linux /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
+
+
+
+
+
+
+ Copy the initrd files.
+
+
+
+
+ For &x86-64; and &aarch64;:
+ &prompt.sudo;cp /mnt/boot/ARCH/loader/initrd /srv/tftpboot/boot/images/SLES-16.0/ARCH/
+
+
+
+ For &ppc64le;:
+
+&prompt.sudo;cp /mnt/boot/ppc64le/initrd /srv/tftpboot/boot/images/SLES-16.0/ppc64le/
+
+
diff --git a/tasks/systemd-timer-create.xml b/tasks/systemd-timer-create.xml
index 212ddca37..a8137c17f 100644
--- a/tasks/systemd-timer-create.xml
+++ b/tasks/systemd-timer-create.xml
@@ -64,7 +64,7 @@ Description="Run helloworld.service 5min after boot and every 24 hours relative
[Timer]
OnBootSec=5min
OnUnitActiveSec=24h
-OnCalendar=Mon..Fri *-*-* 10:00
+OnCalendar=Mon..Fri *-*-* 10:00:00
Unit=helloworld.service
[Install]
@@ -140,7 +140,7 @@ Description="Run helloworld.service 5min after boot and every 24 hours relative
[Timer]
OnBootSec=5min
OnUnitActiveSec=24h
-OnCalendar=Mon..Fri *-*-* 10:00
+OnCalendar=Mon..Fri *-*-* 10:00:00
Unit=helloworld.service
[Install]
diff --git a/tasks/systemd-timer-migrate-from-cron.xml b/tasks/systemd-timer-migrate-from-cron.xml
index 6ba0a41e3..721733ccc 100644
--- a/tasks/systemd-timer-migrate-from-cron.xml
+++ b/tasks/systemd-timer-migrate-from-cron.xml
@@ -101,7 +101,7 @@ ExecStart=/usr/local/bin/helloworld.sh
Description="Run helloworld.service 5min after boot and at 10am every Mon-Fri"
[Timer]
OnBootSec=5min
-OnCalendar=Mon..Fri *-*-* 10:00
+OnCalendar=Mon..Fri *-*-* 10:00:00
Unit=helloworld.service
[Install]
WantedBy=multi-user.target
diff --git a/tasks/systemd-timer-troubleshoot.xml b/tasks/systemd-timer-troubleshoot.xml
index 1daf5e451..649264dc1 100644
--- a/tasks/systemd-timer-troubleshoot.xml
+++ b/tasks/systemd-timer-troubleshoot.xml
@@ -216,7 +216,7 @@ ExecStart=/usr/local/bin/helloworld.sh
Description="Run helloworld.service 5min after boot and at 10am every Mon-Fri"
[Timer]
OnBootSec=5min
-OnCalendar=Mon..Fri *-*-* 10:00
+OnCalendar=Mon..Fri *-*-* 10:00:00
Unit=helloworld.service
[Install]
WantedBy=multi-user.target
diff --git a/tasks/systemd.sysv-convert.xml b/tasks/systemd.sysv-convert.xml
new file mode 100644
index 000000000..965a181b3
--- /dev/null
+++ b/tasks/systemd.sysv-convert.xml
@@ -0,0 +1,121 @@
+
+
+ %entities;
+]>
+
+
+
+ Converting SysV init to &systemd;
+
+
+SysV init is the traditional initialization system used by older Linux distributions.
+It is often referred to as init. Converting a legacy SysV init script to a &systemd; service unit involves replacing a procedural shell script with a declarative configuration file.
+In the SysV model, services are managed via scripts in /etc/init.d/ using case statements to handle start, stop and restart actions.
+In &systemd;, unit files are used and located at /etc/systemd/system/ to define the service's behavior through key-value pairs.
+
+
+Open your existing script in /etc/init.d/and look for the LSB (Linux Standard Base) header.
+ This block contains the configuration you need for your new unit file.
+ Create a new .service file at /etc/systemd/system/ and use the gathered metadata.
+
+Load and test the new service. Before finalizing, tell &systemd; to scan for the new file and then attempt to start it.
+
+Create the .service unit file using the metadata you gathered.
+ Enable the new service at boot:
+ &prompt.sudo; systemctl enable NEW_SERVICE
+ You can now safely remove or move the old script from /etc/init.d/ to prevent confusion.
+
+
+In the following example, boot.local and after.local are used.
+
+ Converting boot.local and after.local to &systemd;
+ The SysV init script files like /etc/init.d/boot.local and /etc/init.d/after.local
+ were used for running custom commands or scripts at the end of the boot process.
+ In a &systemd; environment, to run your own startup script,we recommended creating a custom &systemd; service unit file.
+The boot.local script typically runs very early in the boot process, while after.local runs later, after most system services are up.
+
+
+
+ Create the scripts: Create script files with the commands and make them executable.
+For example:
+
+For boot.local actions, create /usr/local/bin/my.early-boot.sh
+For after.local actions, create /usr/local/bin/my.late-boot.sh
+
+
+
+ Create the &systemd; unit files: Create the .service unit files in the /etc/systemd/system/ directory.
+ For example:
+
+ For boot.local, this service should run before basic system initialization.
+ Create a service file, /etc/systemd/system/my-early-boot.service:
+[Unit]
+ Description=My early startup script
+ DefaultDependencies=no
+ After=basic.target
+ Before=basic.target sysinit.target shutdown.target
+
+ [Service]
+ Type=oneshot
+ ExecStart=/usr/local/bin/my-early-boot.sh
+ RemainAfterExit=yes
+
+ [Install]
+ WantedBy=basic.target
+
+
+ Type=oneshot is suitable for scripts that run once and then exit.
+ DefaultDependencies=no and After/Before ensures it runs at an equivalent time to boot.local.
+
+
+
+ For after.local, this service should run later, typically when the multi-user environment is ready.
+If it depends on the network being up, add After=network-online.target. Create /etc/systemd/system/my-late-boot.service::
+[Unit]
+ Description=My late startup script
+ # Add After=network-online.target if your script needs network access
+ After=multi-user.target
+
+ [Service]
+ Type=oneshot
+ ExecStart=/usr/local/bin/my-late-boot.sh
+ RemainAfterExit=yes
+
+ [Install]
+ WantedBy=multi-user.target
+
+
+
+
+
+ Reload the &systemd; manager: After creating the files, reload the &systemd; manager configuration and enable your new services to run at boot:
+
+ &prompt.sudo; systemctl daemon-reload
+ &prompt.sudo; systemctl enable my-early-boot.service
+ &prompt.sudo; systemctl enable my-late-boot.service
+
+
+ Enable the services: Enable your new services to run at boot:
+
+ &prompt.sudo; systemctl enable my-early-boot.service
+ &prompt.sudo; systemctl enable my-late-boot.service
+
+
+ Verification: To verify that your new services are working as expected:
+ &prompt.sudo; systemctl status my-early-boot.service
+ &prompt.sudo; systemctl status my-late-boot.service
+ The expected output is that both should show Active: inactive (dead) with a successful exit code, because they are Type=oneshot and execute only once.
+
+
+
+
+&systemd; is now the default and only initialization system for
+ &productname; 16.
+
\ No newline at end of file
diff --git a/tasks/using-autoyast-profiles-with-agama.xml b/tasks/using-autoyast-profiles-with-agama.xml
index 872e4d190..bb1cae6c2 100644
--- a/tasks/using-autoyast-profiles-with-agama.xml
+++ b/tasks/using-autoyast-profiles-with-agama.xml
@@ -18,7 +18,7 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:trans="http://docbook.org/ns/transclusion">
- Using Auto&yast; profiles with &agama;
+ Using &ay; profiles with &agama;
@@ -26,26 +26,26 @@
&agama; introduces a modern, declarative installation framework that diverges significantly
- from the legacy Auto&yast; system, even though both aim to automate &productname;
- deployments. While partial reuse of existing Auto&yast; profiles is possible, direct
+ from the legacy &ay; system, even though both aim to automate &productname;
+ deployments. While partial reuse of existing &ay; profiles is possible, direct
compatibility is limited due to schema differences, semantic mismatches, and architectural
- shifts. This topic outlines how to load Auto&yast; profiles in &agama;, identifies
+ shifts. This topic outlines how to load &ay; profiles in &agama;, identifies
supported modules, and offers practical guidance for converting legacy profiles using
recommended tools and conventions.
- Benefits of using Auto&yast; profiles in &agama;
+ Benefits of using &ay; profiles in &agama;
- Reusing existing Auto&yast; profiles in &agama; provides a pragmatic starting point for teams
+ Reusing existing &ay; profiles in &agama; provides a pragmatic starting point for teams
migrating to the new installer without discarding prior investments. Although direct
- compatibility is limited, leveraging Auto&yast; profiles accelerates transition efforts by
+ compatibility is limited, leveraging &ay; profiles accelerates transition efforts by
retaining core configuration logic, organizational conventions, and validated deployment
workflows.
- Using Auto&yast; profiles in &agama; has the following benefits:
+ Using &ay; profiles in &agama; has the following benefits:
@@ -61,7 +61,7 @@
Faster onboarding
- Administrators familiar with Auto&yast; can map known modules to &agama; fields
+ Administrators familiar with &ay; can map known modules to &agama; fields
incrementally.
@@ -70,7 +70,7 @@
Incremental migration
- Supported Auto&yast; elements can be reused while unsupported ones are refactored or
+ Supported &ay; elements can be reused while unsupported ones are refactored or
omitted over time.
@@ -87,22 +87,22 @@
- Limitations of using Auto&yast; profiles in &agama;
+ Limitations of using &ay; profiles in &agama;
- While reusing Auto&yast; profiles in &agama; may provide a head start during migration, it
+ While reusing &ay; profiles in &agama; may provide a head start during migration, it
also introduces significant limitations. The fundamental differences in schema structure,
- execution model, and configuration philosophy mean that Auto&yast;-based profiles can
+ execution model, and configuration philosophy mean that &ay;-based profiles can
constrain the effectiveness and clarity of &agama; workflows if carried over directly.
- Using Auto&yast; profiles in &agama; has the following limitations:
+ Using &ay; profiles in &agama; has the following limitations:
Procedural bias
- Auto&yast; profiles often rely on execution order, embedded scripts, and imperative
+ &ay; profiles often rely on execution order, embedded scripts, and imperative
constructs, which have no counterpart in &agama;’s declarative design.
@@ -111,7 +111,7 @@
Semantic mismatch
- Many Auto&yast; modules encapsulate behavior or assumptions not explicitly modeled in
+ Many &ay; modules encapsulate behavior or assumptions not explicitly modeled in
&agama;, leading to subtle incompatibilities or misconfigurations during reuse.
@@ -120,7 +120,7 @@
Reduced transparency
- Profiles imported from Auto&yast; tend to obscure the declarative simplicity of
+ Profiles imported from &ay; tend to obscure the declarative simplicity of
&agama;, making troubleshooting and peer review harder.
@@ -137,28 +137,28 @@
- Loading Auto&yast; profiles with &agama;
+ Loading &ay; profiles with &agama;
- &agama; supports loading Auto&yast; profiles as part of its transitional support for legacy
+ &agama; supports loading &ay; profiles as part of its transitional support for legacy
automation systems. This allows administrators to reuse existing configuration assets while
gradually migrating to the native &agama; profile format. Several loading mechanisms are
available depending on the deployment context and profile structure.
- Loading an Auto&yast; profile using &agama;
+ Loading an &ay; profile using &agama;
- Use the following steps to load an Auto&yast; profile in &agama;. Profiles can be supplied
+ Use the following steps to load an &ay; profile in &agama;. Profiles can be supplied
either through kernel boot parameters or imported using the &agama; CLI.
- Select a method for providing the Auto&yast; profile to &agama;:
+ Select a method for providing the &ay; profile to &agama;:
Load the profile using a kernel boot parameter. Add the inst.auto
- parameter to the kernel command line and specify the URL of the Auto&yast; profile:
+ parameter to the kernel command line and specify the URL of the &ay; profile:
&prompt.sudo;linux inst.auto=http://EXAMPLE.NET/AGAMA/SLES.xml
@@ -168,7 +168,7 @@
Import the profile using the &agama; CLI. Run the following command to fetch and
- preprocess the Auto&yast; profile:
+ preprocess the &ay; profile:
&prompt.sudo;agama profile import URL
@@ -183,7 +183,7 @@
- Auto&yast; profiles: .xml, .erb, and
+ &ay; profiles: .xml, .erb, and
directories such as rules/ or classes/
@@ -193,7 +193,7 @@
.
- When importing Auto&yast; content, the CLI automatically evaluates dynamic features
+ When importing &ay; content, the CLI automatically evaluates dynamic features
such as:
@@ -218,29 +218,29 @@
- Display the loaded or imported profile, or pip[e it to a JSON file:
+ Display the loaded or imported profile, or pipe it to a JSON file:
&prompt.sudo;agama config show > profile.json
- Best practices for converting Auto&yast; profiles to &agama; profiles
+ Best practices for converting &ay; profiles to &agama; profiles
- Converting Auto&yast; profiles to &agama; profiles involves transforming the original XML
+ Converting &ay; profiles to &agama; profiles involves transforming the original XML
into &agama;'s JSON or Jsonnet format. This procedure outlines the recommended steps using
the &agama; CLI.
- Best practices for converting Auto&yast; profiles to &agama; profiles
+ Best practices for converting &ay; profiles to &agama; profiles
- Convert the Auto&yast; profile to a JSON file by piping the CLI output to a destination
+ Convert the &ay; profile to a JSON file by piping the CLI output to a destination
file:
&prompt.sudo;agama profile autoyast http://EXAMPLE.NET/AUTOYAST.xml > profile.json
- This command fetches and processes the Auto&yast; profile, then writes the resulting
+ This command fetches and processes the &ay; profile, then writes the resulting
&agama;-compatible JSON to the specified file.
@@ -262,7 +262,7 @@
If you require dynamic behavior, convert the profile to Jsonnet. You can then evaluate it
- into JSON:
+ to JSON:
&prompt.sudo;agama profile evaluate profile.jsonnet > profile.json
diff --git a/tasks/valkey-install.xml b/tasks/valkey-install.xml
index 9822452ca..20e904df8 100644
--- a/tasks/valkey-install.xml
+++ b/tasks/valkey-install.xml
@@ -39,7 +39,7 @@ After=network.target
[Service]
User=valkey
Group=valkey
-ExecStart=/usr/sbin/redis-server /etc/redis/valkey.conf
+ExecStart=/usr/bin/valkey-server /etc/valkey/valkey.conf
LimitNOFILE=10240
ExecStop=/usr/bin/valkey-cli shutdown
Restart=always