Compare commits

..

8 Commits

Author SHA1 Message Date
Hyounggyu Choi
be5ae7d1e1 Merge pull request #12573 from BbolroC/support-memory-hotplug-go-runtime-s390x
runtime: Support memory hotplug via virtio-mem on s390x
2026-02-27 09:59:40 +01:00
Steve Horsman
c6014ddfe4 Merge pull request #12574 from sathieu/kata-deploy-kubectl-image
kata-deploy: allow to configure kubectl image
2026-02-27 08:42:06 +00:00
Hyounggyu Choi
b1847f9598 tests: Run TestContainerMemoryUpdate() on s390x only with virtio-mem
Let's run `TestContainerMemoryUpdate` on s390x
only when virtio-mem is enabled.

Signed-off-by: Hyounggyu Choi <Hyounggyu.Choi@ibm.com>
2026-02-26 14:21:34 +01:00
Hyounggyu Choi
b9f3d5aa67 runtime: Support memory hotplug with virtio-mem on s390x
This commit adds logic to properly handle memory hotplug
for QemuCCWVirtio in the ExecMemdevAdd() path.

The new logic is triggered only when virtio-mem is enabled.

Signed-off-by: Hyounggyu Choi <Hyounggyu.Choi@ibm.com>
2026-02-26 14:21:34 +01:00
Hyounggyu Choi
19771671c2 runtime: Handle virtio-mem resize in hotplugAddMemory()
ResizeMemory() already contains the virtio-mem resize logic.
However, hotplugAddMemory(), which is invoked via a different
path, lacked this handling and always fell back to the pc-dimm
path, even when virtio-mem was configured.

This commit adds virtio-mem resize handling to hotplugAddMemory().
It also adds corresponding unit tests.

Signed-off-by: Hyounggyu Choi <Hyounggyu.Choi@ibm.com>
2026-02-26 14:21:34 +01:00
Fabiano Fidêncio
8c91e7889c helm-chart: support digest pinning for images
When image.reference or kubectlImage.reference already contains a digest
(e.g. quay.io/...@sha256:...), use the reference as-is instead of
appending :tag. This avoids invalid image strings like 'image@sha256🔤'
when tag is empty and allows users to pin by digest.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
2026-02-26 13:39:51 +01:00
Mathieu Parent
b61d169472 kata-deploy: allow to configure kubectl image
This can be used to:

- pin tag (current is 20260112)
- pin digest
- use another image

Signed-off-by: Mathieu Parent <mathieu.parent@insee.fr>
2026-02-26 13:12:03 +01:00
Hyounggyu Choi
2860e68534 kernel: Enable CONFIG_VIRTIO_MEM for s390x
Since QEMU v10.0.0 and Linux v6.13, virtio-mem-ccw is supported.
Let's enable the required kernel configs for s390x.

This commit enables `CONFIG_VIRTIO_MEM` and `CONFIG_MEMORY_HOTREMOVE`
to support memory hotplug in the VM guest.

Signed-off-by: Hyounggyu Choi <Hyounggyu.Choi@ibm.com>
2026-02-25 08:17:48 +01:00
28 changed files with 650 additions and 179 deletions

View File

@@ -47,23 +47,6 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install yq
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
env:

View File

@@ -47,25 +47,8 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install yq
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
run: bash tests/integration/cri-containerd/gha-run.sh
env:
GH_TOKEN: ${{ github.token }}

View File

@@ -82,17 +82,11 @@ jobs:
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
- name: Install golang
if: contains(matrix.component.needs, 'golang')
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
if: contains(matrix.component.needs, 'golang')
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
- name: Setup rust
if: contains(matrix.component.needs, 'rust')
run: |

View File

@@ -31,22 +31,10 @@ jobs:
with:
persist-credentials: false
- name: Install yq
- name: Install golang
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
- name: Install Rust
run: ./tests/install_rust.sh

View File

@@ -24,22 +24,10 @@ jobs:
fetch-depth: 0
persist-credentials: false
- name: Install yq
- name: Install golang
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
- name: Docs URL Alive Check
run: |

View File

@@ -27,22 +27,10 @@ jobs:
fetch-depth: 0
persist-credentials: false
- name: Install yq
- name: Install golang
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
- name: Install govulncheck
run: |

View File

@@ -55,23 +55,6 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install yq
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
- name: Install dependencies
timeout-minutes: 15
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies

View File

@@ -57,24 +57,10 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install yq
- name: Install golang
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
# Setup-go doesn't work properly with ppc64le: https://github.com/actions/setup-go/issues/648
architecture: ${{ contains(inputs.instance, 'ppc64le') && 'ppc64le' || '' }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
- name: Prepare the runner for k8s test suite
run: bash "${HOME}/scripts/k8s_cluster_prepare.sh"

View File

@@ -126,15 +126,11 @@ jobs:
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Read properties from versions.yaml
- name: Install golang
run: |
go_version="$(yq '.languages.golang.version' versions.yaml)"
[ -n "$go_version" ]
echo "GO_VERSION=${go_version}" >> "$GITHUB_ENV"
- name: Setup Golang version ${{ env.GO_VERSION }}
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: ${{ env.GO_VERSION }}
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}"
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
- name: Install system dependencies
run: |
sudo apt-get update && sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc

View File

@@ -272,6 +272,7 @@ DEFVIRTIOFSEXTRAARGS ?= [\"--thread-pool-size=1\", \"--announce-submounts\"]
DEFENABLEIOTHREADS := false
DEFINDEPIOTHREADS := 0
DEFENABLEVHOSTUSERSTORE := false
DEFENABLEVIRTIOMEM ?= false
DEFVHOSTUSERSTOREPATH := $(PKGRUNDIR)/vhost-user
DEFVALIDVHOSTUSERSTOREPATHS := [\"$(DEFVHOSTUSERSTOREPATH)\"]
DEFFILEMEMBACKEND := ""
@@ -764,6 +765,7 @@ USER_VARS += DEFENABLEANNOTATIONS
USER_VARS += DEFENABLEANNOTATIONS_COCO
USER_VARS += DEFENABLEIOTHREADS
USER_VARS += DEFINDEPIOTHREADS
USER_VARS += DEFENABLEVIRTIOMEM
USER_VARS += DEFSECCOMPSANDBOXPARAM
USER_VARS += DEFENABLEVHOSTUSERSTORE
USER_VARS += DEFVHOSTUSERSTOREPATH

View File

@@ -18,3 +18,6 @@ ifneq (,$(NEEDS_CC_SETTING))
CC := gcc
export CC
endif
# Enable virtio-mem for s390x
DEFENABLEVIRTIOMEM = true

View File

@@ -142,7 +142,7 @@ memory_offset = 0
# Please note that this option should be used with the command
# "echo 1 > /proc/sys/vm/overcommit_memory".
# Default false
enable_virtio_mem = false
enable_virtio_mem = @DEFENABLEVIRTIOMEM@
# Disable hotplugging host block devices to guest VMs for container rootfs.
# In case of a storage driver like devicemapper where a container's

View File

@@ -1446,11 +1446,18 @@ func (q *QMP) ExecMemdevAdd(ctx context.Context, qomtype, id, mempath string, si
"memdev": id,
}
if bus != "" {
args["bus"] = bus
}
if addr != "" {
args["addr"] = addr
var transport VirtioTransport
if transport.isVirtioCCW(nil) {
if addr != "" {
args["devno"] = addr
}
} else {
if bus != "" {
args["bus"] = bus
}
if addr != "" {
args["addr"] = addr
}
}
err = q.executeCommand(ctx, "device_add", args, nil)

View File

@@ -92,9 +92,10 @@ const (
)
var (
hvLogger = logrus.WithField("source", "virtcontainers/hypervisor")
noGuestMemHotplugErr error = errors.New("guest memory hotplug not supported")
conflictingAssets error = errors.New("cannot set both image and initrd at the same time")
hvLogger = logrus.WithField("source", "virtcontainers/hypervisor")
noGuestMemHotplugErr error = errors.New("guest memory hotplug not supported")
s390xVirtioMemRequiredErr error = errors.New("memory hotplug on s390x requires virtio-mem to be enabled")
conflictingAssets error = errors.New("cannot set both image and initrd at the same time")
)
// In some architectures the maximum number of vCPUs depends on the number of physical cores.

View File

@@ -996,37 +996,64 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error {
return err
}
addr, bridge, err := q.arch.addDeviceToBridge(ctx, "virtiomem-dev", types.PCI)
if err != nil {
return err
}
defer func() {
if err != nil {
q.arch.removeDeviceFromBridge("virtiomem-dev")
}
}()
bridgeID := bridge.ID
// Hot add virtioMem dev to pcie-root-port for QemuVirt
machineType := q.HypervisorConfig().HypervisorMachineType
if machineType == QemuVirt {
addr = "00"
bridgeID = fmt.Sprintf("%s%d", config.PCIeRootPortPrefix, len(config.PCIeDevicesPerPort[config.RootPort]))
dev := config.VFIODev{ID: "virtiomem"}
config.PCIeDevicesPerPort[config.RootPort] = append(config.PCIeDevicesPerPort[config.RootPort], dev)
var driver, addr, devAddr, bus string
var bridge types.Bridge
if machineType == QemuCCWVirtio {
driver = "virtio-mem-ccw"
addr, bridge, err = q.arch.addDeviceToBridge(ctx, "virtiomem-dev", types.CCW)
if err != nil {
return err
}
defer func() {
if err != nil {
q.arch.removeDeviceFromBridge("virtiomem-dev")
}
}()
devAddr, err = bridge.AddressFormatCCW(addr)
if err != nil {
return err
}
} else {
driver = "virtio-mem-pci"
addr, bridge, err = q.arch.addDeviceToBridge(ctx, "virtiomem-dev", types.PCI)
if err != nil {
return err
}
defer func() {
if err != nil {
q.arch.removeDeviceFromBridge("virtiomem-dev")
}
}()
devAddr = addr
bus = bridge.ID
// Hot add virtioMem dev to pcie-root-port for QemuVirt
if machineType == QemuVirt {
devAddr = "00"
bus = fmt.Sprintf("%s%d", config.PCIeRootPortPrefix, len(config.PCIeDevicesPerPort[config.RootPort]))
dev := config.VFIODev{ID: "virtiomem"}
config.PCIeDevicesPerPort[config.RootPort] = append(config.PCIeDevicesPerPort[config.RootPort], dev)
}
}
err = q.qmpMonitorCh.qmp.ExecMemdevAdd(q.qmpMonitorCh.ctx, memoryBack, "virtiomem", target, sizeMB, share, "virtio-mem-pci", "virtiomem0", addr, bridgeID)
err = q.qmpMonitorCh.qmp.ExecMemdevAdd(q.qmpMonitorCh.ctx, memoryBack, "virtiomem", target, sizeMB, share, driver, "virtiomem0", devAddr, bus)
if err == nil {
q.Logger().Infof("Setup %dMB virtio-mem-pci success", sizeMB)
q.Logger().Infof("Setup %dMB %s success", sizeMB, driver)
} else {
help := ""
if strings.Contains(err.Error(), "Cannot allocate memory") {
help = ". Please use command \"echo 1 > /proc/sys/vm/overcommit_memory\" handle it."
}
err = fmt.Errorf("Add %dMB virtio-mem-pci fail %s%s", sizeMB, err.Error(), help)
err = fmt.Errorf("Add %dMB %s fail %s%s", sizeMB, driver, err.Error(), help)
}
return err
@@ -2206,10 +2233,14 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
}
func (q *qemu) hotplugMemory(memDev *MemoryDevice, op Operation) (int, error) {
if !q.arch.supportGuestMemoryHotplug() {
return 0, noGuestMemHotplugErr
}
if q.HypervisorConfig().HypervisorMachineType == QemuCCWVirtio && !q.config.VirtioMem {
return 0, s390xVirtioMemRequiredErr
}
if memDev.SizeMB < 0 {
return 0, fmt.Errorf("cannot hotplug negative size (%d) memory", memDev.SizeMB)
}
@@ -2245,7 +2276,30 @@ func (q *qemu) hotplugMemory(memDev *MemoryDevice, op Operation) (int, error) {
}
// resizeVirtioMem resizes the virtio-mem device to the specified size in MB
func (q *qemu) resizeVirtioMem(newSizeMB int) error {
if newSizeMB < 0 {
return fmt.Errorf("cannot resize virtio-mem device to negative size (%d) memory", newSizeMB)
}
sizeByte := uint64(newSizeMB) * 1024 * 1024
err := q.qmpMonitorCh.qmp.ExecQomSet(q.qmpMonitorCh.ctx, "virtiomem0", "requested-size", sizeByte)
if err != nil {
q.Logger().WithError(err).Error("failed to resize virtio-mem device")
return err
}
q.state.HotpluggedMemory = newSizeMB
return nil
}
func (q *qemu) hotplugAddMemory(memDev *MemoryDevice) (int, error) {
if q.config.VirtioMem {
newHotpluggedMB := q.state.HotpluggedMemory + memDev.SizeMB
if err := q.resizeVirtioMem(newHotpluggedMB); err != nil {
return 0, err
}
return memDev.SizeMB, nil
}
memoryDevices, err := q.qmpMonitorCh.qmp.ExecQueryMemoryDevices(q.qmpMonitorCh.ctx)
if err != nil {
return 0, fmt.Errorf("failed to query memory devices: %v", err)
@@ -2460,13 +2514,10 @@ func (q *qemu) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
var addMemDevice MemoryDevice
if q.config.VirtioMem && currentMemory != reqMemMB {
q.Logger().WithField("hotplug", "memory").Debugf("resize memory from %dMB to %dMB", currentMemory, reqMemMB)
sizeByte := uint64(reqMemMB - q.config.MemorySize)
sizeByte = sizeByte * 1024 * 1024
err := q.qmpMonitorCh.qmp.ExecQomSet(q.qmpMonitorCh.ctx, "virtiomem0", "requested-size", sizeByte)
if err != nil {
newSizeMB := int(reqMemMB) - int(q.config.MemorySize)
if err := q.resizeVirtioMem(newSizeMB); err != nil {
return 0, MemoryDevice{}, err
}
q.state.HotpluggedMemory = int(sizeByte / 1024 / 1024)
return reqMemMB, MemoryDevice{}, nil
}

View File

@@ -199,10 +199,8 @@ func (q *qemuS390x) appendVhostUserDevice(ctx context.Context, devices []govmmQe
return devices, nil
}
// supportGuestMemoryHotplug return false for s390x architecture. The pc-dimm backend device for s390x
// is not support. PC-DIMM is not listed in the devices supported by qemu-system-s390x -device help
func (q *qemuS390x) supportGuestMemoryHotplug() bool {
return false
return q.protection == noneProtection
}
func (q *qemuS390x) appendNetwork(ctx context.Context, devices []govmmQemu.Device, endpoint Endpoint) ([]govmmQemu.Device, error) {

View File

@@ -8,12 +8,14 @@
package virtcontainers
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
"io"
"net"
"os"
"path"
"path/filepath"
@@ -825,3 +827,376 @@ func TestPrepareInitdataImage(t *testing.T) {
})
}
}
// startTestQMPServer starts a goroutine acting as a minimal QMP server on serverConn.
// It sends the QMP hello banner and responds to commands with the given responses
// (one response string per command, in order). After all responses are sent,
// it keeps the connection open until the client closes it.
func startTestQMPServer(t *testing.T, serverConn net.Conn, responses []string) {
t.Helper()
go func() {
defer serverConn.Close()
hello := `{"QMP":{"version":{"qemu":{"micro":0,"minor":0,"major":5},"package":""},"capabilities":[]}}` + "\n"
if _, err := serverConn.Write([]byte(hello)); err != nil {
return
}
scanner := bufio.NewScanner(serverConn)
for _, resp := range responses {
if !scanner.Scan() {
return
}
if _, err := serverConn.Write([]byte(resp + "\n")); err != nil {
return
}
}
// Keep reading (and ignoring) any additional commands to keep connection alive
for scanner.Scan() {
}
}()
}
// TestHotplugAddMemoryVirtioMem verifies that when VirtioMem is enabled,
// hotplugAddMemory resizes the existing virtio-mem device via qom-set
// instead of adding a new pc-dimm, and updates HotpluggedMemory on success.
func TestHotplugAddMemoryVirtioMem(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
startTestQMPServer(t, serverConn, []string{`{"return":{}}`})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
memDev := &MemoryDevice{SizeMB: 128}
n, err := q.hotplugAddMemory(memDev)
assert.NoError(err)
assert.Equal(128, n)
// HotpluggedMemory should reflect the cumulative total: initial 100 + added 128
assert.Equal(228, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryVirtioMemMultipleOperations verifies that
// multiple virtio-mem resize operations accumulate correctly.
func TestHotplugAddMemoryVirtioMemMultipleOperations(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
// Three successful resize operations
responses := []string{
`{"return":{}}`,
`{"return":{}}`,
`{"return":{}}`,
}
startTestQMPServer(t, serverConn, responses)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
},
state: QemuState{
HotpluggedMemory: 0,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
// First resize: 0 -> 128MB
memDev1 := &MemoryDevice{SizeMB: 128}
n, err := q.hotplugAddMemory(memDev1)
assert.NoError(err)
assert.Equal(128, n)
assert.Equal(128, q.state.HotpluggedMemory)
// Second resize: 128 -> 384MB
memDev2 := &MemoryDevice{SizeMB: 256}
n, err = q.hotplugAddMemory(memDev2)
assert.NoError(err)
assert.Equal(256, n)
assert.Equal(384, q.state.HotpluggedMemory)
// Third resize: 384 -> 896MB
memDev3 := &MemoryDevice{SizeMB: 512}
n, err = q.hotplugAddMemory(memDev3)
assert.NoError(err)
assert.Equal(512, n)
assert.Equal(896, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryVirtioMemZeroSize verifies behavior
// when attempting to add zero memory with virtio-mem.
func TestHotplugAddMemoryVirtioMemZeroSize(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
startTestQMPServer(t, serverConn, []string{`{"return":{}}`})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
memDev := &MemoryDevice{SizeMB: 0}
n, err := q.hotplugAddMemory(memDev)
assert.NoError(err)
assert.Equal(0, n)
// State should remain unchanged
assert.Equal(100, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryVirtioMemError verifies that on a QMP failure
// the error is propagated and HotpluggedMemory is not updated.
func TestHotplugAddMemoryVirtioMemError(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
startTestQMPServer(t, serverConn, []string{`{"error":{"class":"GenericError","desc":"test error"}}`})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
memDev := &MemoryDevice{SizeMB: 128}
n, err := q.hotplugAddMemory(memDev)
assert.Error(err)
assert.Equal(0, n)
// HotpluggedMemory must not be updated when the QMP command fails
assert.Equal(100, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryDIMM verifies the traditional DIMM-based hotplug path
// when VirtioMem is disabled. It should query existing memory devices,
// allocate a new slot, and hotplug the DIMM.
func TestHotplugAddMemoryDIMM(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
// Responses: query-memory-devices, object-add, device_add
responses := []string{
`{"return":[]}`, // query-memory-devices: empty
`{"return":{}}`, // object-add: success
`{"return":{}}`, // device_add: success
}
startTestQMPServer(t, serverConn, responses)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: false, // Traditional DIMM path
},
qemuConfig: govmmQemu.Config{
Knobs: govmmQemu.Knobs{
HugePages: false,
},
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
memDev := &MemoryDevice{SizeMB: 256}
n, err := q.hotplugAddMemory(memDev)
assert.NoError(err)
assert.Equal(256, n)
assert.Equal(0, memDev.Slot) // Should get slot 0 when no devices exist
assert.Equal(356, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryDIMMHotplugError verifies error handling
// when the actual hotplug operation fails.
func TestHotplugAddMemoryDIMMHotplugError(t *testing.T) {
assert := assert.New(t)
serverConn, clientConn := net.Pipe()
// Responses: query-memory-devices succeeds, object-add fails
responses := []string{
`{"return":[]}`, // query-memory-devices: success
`{"error":{"class":"GenericError","desc":"hotplug failed"}}`, // object-add: fails
}
startTestQMPServer(t, serverConn, responses)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disconnectedCh := make(chan struct{})
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
qmp, _, err := govmmQemu.QMPStartWithConn(ctx, clientConn, cfg, disconnectedCh)
assert.NoError(err)
defer func() {
qmp.Shutdown()
<-disconnectedCh
}()
q := &qemu{
config: HypervisorConfig{
VirtioMem: false,
},
qemuConfig: govmmQemu.Config{
Knobs: govmmQemu.Knobs{
HugePages: false,
},
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: qmp,
ctx: ctx,
},
}
memDev := &MemoryDevice{SizeMB: 256}
n, err := q.hotplugAddMemory(memDev)
assert.Error(err)
assert.Equal(0, n)
// HotpluggedMemory should not be updated on error
assert.Equal(100, q.state.HotpluggedMemory)
}
// TestHotplugAddMemoryVirtioMemNegativeSize verifies that
// adding a memory device with negative size is handled gracefully.
func TestHotplugAddMemoryVirtioMemNegativeSize(t *testing.T) {
assert := assert.New(t)
// No need to start a QMP server since the error should be caught
// before any QMP command is issued
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
},
state: QemuState{
HotpluggedMemory: 100,
},
}
memDev := &MemoryDevice{SizeMB: -128}
n, err := q.hotplugAddMemory(memDev)
assert.EqualError(err, "cannot resize virtio-mem device to negative size (-28) memory")
assert.Equal(0, n)
// State should remain unchanged
assert.Equal(100, q.state.HotpluggedMemory)
}
// TestResizeMemoryVirtioMemNegativeSize verifies that
// ResizeMemory with VirtioMem handles negative hotplug size gracefully.
func TestResizeMemoryVirtioMemNegativeSize(t *testing.T) {
assert := assert.New(t)
q := &qemu{
config: HypervisorConfig{
VirtioMem: true,
MemorySize: 2048, // 2GB base memory
},
state: QemuState{
HotpluggedMemory: 100,
},
qmpMonitorCh: qmpChannel{
qmp: &govmmQemu.QMP{},
},
}
// Request size less than base memory would result in negative hotplug size
newMem, memDev, err := q.ResizeMemory(context.Background(), 1024, 128, false)
assert.EqualError(err, "cannot resize virtio-mem device to negative size (-1024) memory")
assert.Equal(uint32(0), newMem)
assert.Equal(MemoryDevice{}, memDev)
// State should remain unchanged
assert.Equal(100, q.state.HotpluggedMemory)
}

100
tests/install_go.sh Executable file
View File

@@ -0,0 +1,100 @@
#!/bin/bash
#
# Copyright (c) 2018-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
[[ -n "${DEBUG:-}" ]] && set -x
tmp_dir=$(mktemp -d -t install-go-tmp.XXXXXXXXXX)
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
script_name="$(basename "${BASH_SOURCE[0]}")"
force=""
USE_VERSIONS_FILE=""
PROJECT="Kata Containers"
source "${script_dir}/common.bash"
install_dest="/usr/local/"
function finish() {
rm -rf "$tmp_dir"
}
function usage(){
exit_code="$1"
cat <<EOF
Usage:
${script_name} [options] <args>
Args:
<go-version> : Install a specific go version.
Example:
${script_name} 1.10
Options
-d <path> : destination path, path where go will be installed.
-f : Force remove old go version and install the specified one.
-h : Show this help
-p : Install go defined in ${PROJECT} versions file.
EOF
exit "$exit_code"
}
trap finish EXIT
pushd "${tmp_dir}"
while getopts "d:fhp" opt
do
case $opt in
d) install_dest="${OPTARG}" ;;
f) force="true" ;;
h) usage 0 ;;
p) USE_VERSIONS_FILE="true" ;;
esac
done
shift $(( $OPTIND - 1 ))
go_version="${1:-""}"
if [ -z "$go_version" ] && [ "${USE_VERSIONS_FILE}" = "true" ] ;then
go_version=$(get_from_kata_deps ".languages.golang.meta.newest-version")
fi
if [ -z "$go_version" ];then
echo "Missing go version or -p option"
usage 0
fi
if command -v go; then
[[ "$(go version)" == *"go${go_version}"* ]] && \
info "Go ${go_version} already installed" && \
exit
if [ "${force}" = "true" ]; then
info "removing $(go version)"
sudo rm -rf "${install_dest}/go"
else
die "$(go version) is installed, use -f or remove it before install go ${go_version}"
fi
fi
goarch=$(arch_to_golang)
info "Download go version ${go_version}"
kernel_name=$(uname -s | tr '[:upper:]' '[:lower:]')
curl -fsSOL "https://go.dev/dl/go${go_version}.${kernel_name}-${goarch}.tar.gz"
info "Install go"
mkdir -p "${install_dest}"
sudo tar -C "${install_dest}" -xzf "go${go_version}.${kernel_name}-${goarch}.tar.gz"
popd

View File

@@ -45,6 +45,9 @@ function install_dependencies() {
sudo apt-get update
sudo apt-get -y install "${system_deps[@]}"
ensure_yq
"${repo_root_dir}/tests/install_go.sh" -p -f
# Dependency list of projects that we can install them
# directly from their releases on GitHub:
# - containerd

View File

@@ -268,11 +268,15 @@ function TestContainerMemoryUpdate() {
info "TestContainerMemoryUpdate skipped for qemu with runtime-rs"
info "Please check out https://github.com/kata-containers/kata-containers/issues/9375"
return
elif [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" == "ppc64le" ]] || [[ "${ARCH}" == "s390x" ]]; then
elif [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" == "ppc64le" ]]; then
return
fi
for virtio_mem_enabled in 1 0; do
# On s390x, only run the test when virtio_mem is enabled
if [[ "${ARCH}" == "s390x" ]] && [[ $virtio_mem_enabled -eq 0 ]]; then
continue
fi
PrepareContainerMemoryUpdate $virtio_mem_enabled
DoContainerMemoryUpdate $virtio_mem_enabled
done
@@ -282,7 +286,7 @@ function PrepareContainerMemoryUpdate() {
test_virtio_mem=$1
if [ $test_virtio_mem -eq 1 ]; then
if [[ "$ARCH" != "x86_64" ]] && [[ "$ARCH" != "aarch64" ]]; then
if [[ "$ARCH" != "x86_64" ]] && [[ "$ARCH" != "aarch64" ]] && [[ "$ARCH" != "s390x" ]]; then
return
fi
info "Test container memory update with virtio-mem"

View File

@@ -127,8 +127,10 @@ All values can be overridden with --set key=value or a custom `-f myvalues.yaml`
|-----|-------------|---------|
| `imagePullPolicy` | Set the DaemonSet pull policy | `Always` |
| `imagePullSecrets` | Enable pulling from a private registry via pull secret | `""` |
| `image.reference` | Fully qualified image reference | `quay.io/kata-containers/kata-deploy` |
| `image.tag` | Tag of the image reference | `""` |
| `image.reference` | Fully qualified image reference (for digest pinning use the full image e.g. `…@sha256:...`; tag is ignored) | `quay.io/kata-containers/kata-deploy` |
| `image.tag` | Tag of the image reference (defaults to chart `AppVersion` when empty) | `""` |
| `kubectlImage.reference` | Fully qualified `kubectl` image reference (for digest pinning use the full image e.g. `…@sha256:...` and leave `kubectlImage.tag` empty) | `quay.io/kata-containers/kubectl` |
| `kubectlImage.tag` | Tag of the `kubectl` image reference | `latest` |
| `k8sDistribution` | Set the k8s distribution to use: `k8s`, `k0s`, `k3s`, `rke2`, `microk8s` | `k8s` |
| `nodeSelector` | Node labels for pod assignment. Allows restricting deployment to specific nodes | `{}` |
| `runtimeClasses.enabled` | Enable Helm-managed `runtimeClass` creation (recommended) | `true` |

View File

@@ -334,6 +334,36 @@ Builds per-shim semicolon-separated list: "shim1=value1;shim2=value2"
{{- join ";" $proxies -}}
{{- end -}}
{{/*
Main kata-deploy image reference for the DaemonSet.
Supports tag (reference:tag) and digest (reference@sha256:...) formats.
When reference contains "@" (digest), use reference as-is; otherwise use reference:tag (tag defaults to Chart.AppVersion).
*/}}
{{- define "kata-deploy.image" -}}
{{- $ref := .Values.image.reference -}}
{{- $tag := default .Chart.AppVersion .Values.image.tag | toString -}}
{{- if contains "@" $ref -}}
{{- $ref -}}
{{- else -}}
{{- printf "%s:%s" $ref $tag -}}
{{- end -}}
{{- end -}}
{{/*
kubectl image reference for verification and cleanup jobs.
Supports tag (reference:tag) and digest (reference@sha256:...) formats.
When reference already contains "@" (digest) or tag is empty, use reference as-is.
*/}}
{{- define "kata-deploy.kubectlImage" -}}
{{- $ref := .Values.kubectlImage.reference -}}
{{- $tag := .Values.kubectlImage.tag | toString -}}
{{- if or (contains "@" $ref) (eq $tag "") -}}
{{- $ref -}}
{{- else -}}
{{- printf "%s:%s" $ref $tag -}}
{{- end -}}
{{- end -}}
{{/*
Get snapshotter setup list from structured config
*/}}

View File

@@ -33,7 +33,7 @@ spec:
{{- end }}
containers:
- name: rb-cleanup
image: quay.io/kata-containers/kubectl:latest
image: {{ include "kata-deploy.kubectlImage" . }}
command:
- bash
- -c

View File

@@ -133,7 +133,7 @@ spec:
terminationGracePeriodSeconds: 120
containers:
- name: kube-kata
image: {{ .Values.image.reference }}:{{ default .Chart.AppVersion .Values.image.tag }}
image: {{ include "kata-deploy.image" . }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
command: ["/usr/bin/kata-deploy", "install"]
env:

View File

@@ -42,7 +42,7 @@ spec:
serviceAccountName: {{ include "kata-deploy.fullname" . }}-verification
containers:
- name: verify
image: quay.io/kata-containers/kubectl:latest
image: {{ include "kata-deploy.kubectlImage" . }}
command:
- bash
- -c

View File

@@ -6,6 +6,10 @@ image:
reference: quay.io/kata-containers/kata-deploy
tag: ""
kubectlImage:
reference: quay.io/kata-containers/kubectl
tag: latest
k8sDistribution: "k8s" # k8s, k3s, rke2, k0s, microk8s
# Node selector and tolerations to control which nodes the kata-deploy daemonset runs on

View File

@@ -0,0 +1,2 @@
CONFIG_VIRTIO_MEM=y
CONFIG_MEMORY_HOTREMOVE=y

View File

@@ -1 +1 @@
182
183