mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b39dc0a39 | ||
|
|
5c69eb5be6 | ||
|
|
309756db95 | ||
|
|
b3d985e29c | ||
|
|
a818771750 | ||
|
|
d745287f51 | ||
|
|
52993b91b7 | ||
|
|
30a8166f4a | ||
|
|
7033c97cd2 | ||
|
|
4f08cc9bb9 | ||
|
|
e8ec0c402f | ||
|
|
5254670e47 | ||
|
|
d92ada72de | ||
|
|
565fdf8263 | ||
|
|
f174fac0d6 | ||
|
|
928654b5cd | ||
|
|
1c0e6b4356 | ||
|
|
8f40927df8 |
@@ -206,7 +206,7 @@ parts:
|
||||
|
||||
# Install raw kernel
|
||||
vmlinux_path="vmlinux"
|
||||
[ "${arch}" = "s390x" ] && vmlinux_path="arch/s390/boot/compressed/vmlinux"
|
||||
[ "${arch}" = "s390x" ] && vmlinux_path="arch/s390/boot/vmlinux"
|
||||
vmlinux_name="vmlinux-${kernel_suffix}"
|
||||
cp "${vmlinux_path}" "${kata_kernel_dir}/${vmlinux_name}"
|
||||
ln -sf "${vmlinux_name}" "${kata_kernel_dir}/vmlinux.container"
|
||||
|
||||
17
src/agent/Cargo.lock
generated
17
src/agent/Cargo.lock
generated
@@ -162,13 +162,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cgroups-rs"
|
||||
version = "0.2.9"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdae996d9638ba03253ffa1c93345a585974a97abbdeab9176c77922f3efc1e8"
|
||||
checksum = "cf5525f2cf84d5113ab26bfb6474180eb63224b4b1e4be31ee87be4098f11399"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.23.1",
|
||||
"nix 0.24.2",
|
||||
"regex",
|
||||
]
|
||||
|
||||
@@ -822,6 +822,17 @@ dependencies = [
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ntapi"
|
||||
version = "0.3.7"
|
||||
|
||||
@@ -49,7 +49,7 @@ log = "0.4.11"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.8" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
|
||||
@@ -23,7 +23,7 @@ scan_fmt = "0.2.6"
|
||||
regex = "1.5.6"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.8" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
// only register the proto type
|
||||
crioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
|
||||
@@ -136,7 +137,7 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
|
||||
katautils.HandleFactory(ctx, vci, s.config)
|
||||
rootless.SetRootless(s.config.HypervisorConfig.Rootless)
|
||||
if rootless.IsRootless() {
|
||||
if err := configureNonRootHypervisor(s.config); err != nil {
|
||||
if err := configureNonRootHypervisor(s.config, r.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -303,13 +304,17 @@ func doMount(mounts []*containerd_types.Mount, rootfs string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureNonRootHypervisor(runtimeConfig *oci.RuntimeConfig) error {
|
||||
func configureNonRootHypervisor(runtimeConfig *oci.RuntimeConfig, sandboxId string) error {
|
||||
userName, err := utils.CreateVmmUser()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
shimLog.WithFields(logrus.Fields{
|
||||
"user_name": userName,
|
||||
"sandbox_id": sandboxId,
|
||||
}).WithError(err).Warn("configure non root hypervisor failed, delete the user")
|
||||
if err2 := utils.RemoveVmmUser(userName); err2 != nil {
|
||||
shimLog.WithField("userName", userName).WithError(err).Warn("failed to remove user")
|
||||
}
|
||||
@@ -330,7 +335,14 @@ func configureNonRootHypervisor(runtimeConfig *oci.RuntimeConfig) error {
|
||||
return err
|
||||
}
|
||||
runtimeConfig.HypervisorConfig.Uid = uint32(uid)
|
||||
runtimeConfig.HypervisorConfig.User = userName
|
||||
runtimeConfig.HypervisorConfig.Gid = uint32(gid)
|
||||
shimLog.WithFields(logrus.Fields{
|
||||
"user_name": userName,
|
||||
"uid": uid,
|
||||
"gid": gid,
|
||||
"sandbox_id": sandboxId,
|
||||
}).Debug("successfully created a non root user for the hypervisor")
|
||||
|
||||
userTmpDir := path.Join("/run/user/", fmt.Sprint(uid))
|
||||
_, err = os.Stat(userTmpDir)
|
||||
|
||||
@@ -667,6 +667,10 @@ func (a *Acrn) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
|
||||
return VcpuThreadIDs{}, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) GetTotalMemoryMB(ctx context.Context) uint32 {
|
||||
return a.config.MemorySize
|
||||
}
|
||||
|
||||
func (a *Acrn) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
|
||||
return 0, MemoryDevice{}, nil
|
||||
}
|
||||
|
||||
@@ -1576,6 +1576,16 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) GetTotalMemoryMB(ctx context.Context) uint32 {
|
||||
vminfo, err := clh.vmInfo()
|
||||
if err != nil {
|
||||
clh.Logger().WithError(err).Error("failed to get vminfo")
|
||||
return 0
|
||||
}
|
||||
|
||||
return uint32(vminfo.GetMemoryActualSize() >> utils.MibToBytesShift)
|
||||
}
|
||||
|
||||
// vmInfo ask to hypervisor for current VM status
|
||||
func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) {
|
||||
cl := clh.client()
|
||||
|
||||
@@ -1165,6 +1165,10 @@ func (fc *firecracker) HypervisorConfig() HypervisorConfig {
|
||||
return fc.config
|
||||
}
|
||||
|
||||
func (fc *firecracker) GetTotalMemoryMB(ctx context.Context) uint32 {
|
||||
return fc.config.MemorySize
|
||||
}
|
||||
|
||||
func (fc *firecracker) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
|
||||
return 0, MemoryDevice{}, nil
|
||||
}
|
||||
|
||||
@@ -371,6 +371,9 @@ type HypervisorConfig struct {
|
||||
// SeccompSandbox is the qemu function which enables the seccomp feature
|
||||
SeccompSandbox string
|
||||
|
||||
// The user maps to the uid.
|
||||
User string
|
||||
|
||||
// KernelParams are additional guest kernel parameters.
|
||||
KernelParams []Param
|
||||
|
||||
@@ -913,6 +916,7 @@ type Hypervisor interface {
|
||||
HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error)
|
||||
ResizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error)
|
||||
ResizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
|
||||
GetTotalMemoryMB(ctx context.Context) uint32
|
||||
GetVMConsole(ctx context.Context, sandboxID string) (string, string, error)
|
||||
Disconnect(ctx context.Context)
|
||||
Capabilities(ctx context.Context) types.Capabilities
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
var MockHybridVSockPath = "/tmp/kata-mock-hybrid-vsock.socket"
|
||||
|
||||
type mockHypervisor struct {
|
||||
config HypervisorConfig
|
||||
mockPid int
|
||||
}
|
||||
|
||||
@@ -27,10 +28,11 @@ func (m *mockHypervisor) Capabilities(ctx context.Context) types.Capabilities {
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) HypervisorConfig() HypervisorConfig {
|
||||
return HypervisorConfig{}
|
||||
return m.config
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) setConfig(config *HypervisorConfig) error {
|
||||
m.config = *config
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,7 +40,7 @@ func (m *mockHypervisor) CreateVM(ctx context.Context, id string, network Networ
|
||||
if err := m.setConfig(hypervisorConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.config.MemSlots = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -92,12 +94,20 @@ func (m *mockHypervisor) GetVMConsole(ctx context.Context, sandboxID string) (st
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) ResizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
|
||||
if m.config.MemorySize != memMB {
|
||||
// For testing, we'll use MemSlots to track how many times we resized memory
|
||||
m.config.MemSlots += 1
|
||||
m.config.MemorySize = memMB
|
||||
}
|
||||
return 0, MemoryDevice{}, nil
|
||||
}
|
||||
func (m *mockHypervisor) ResizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) GetTotalMemoryMB(ctx context.Context) uint32 {
|
||||
return m.config.MemorySize
|
||||
}
|
||||
func (m *mockHypervisor) Disconnect(ctx context.Context) {
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMockHypervisorCreateVM(t *testing.T) {
|
||||
var m *mockHypervisor
|
||||
m := &mockHypervisor{}
|
||||
assert := assert.New(t)
|
||||
|
||||
sandbox := &Sandbox{
|
||||
|
||||
@@ -110,6 +110,8 @@ type qemu struct {
|
||||
nvdimmCount int
|
||||
|
||||
stopped bool
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -678,7 +680,7 @@ func (q *qemu) checkBpfEnabled() {
|
||||
q.Logger().WithError(err).Warningf("failed to get bpf_jit_enable status")
|
||||
return
|
||||
}
|
||||
enabled, err := strconv.Atoi(string(out))
|
||||
enabled, err := strconv.Atoi(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
q.Logger().WithError(err).Warningf("failed to convert bpf_jit_enable status to integer")
|
||||
return
|
||||
@@ -968,6 +970,8 @@ func (q *qemu) waitVM(ctx context.Context, timeout int) error {
|
||||
|
||||
// StopVM will stop the Sandbox's VM.
|
||||
func (q *qemu) StopVM(ctx context.Context, waitOnly bool) error {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
span, _ := katatrace.Trace(ctx, q.Logger(), "StopVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
|
||||
defer span.End()
|
||||
|
||||
@@ -1059,15 +1063,27 @@ func (q *qemu) cleanupVM() error {
|
||||
}
|
||||
|
||||
if rootless.IsRootless() {
|
||||
u, err := user.LookupId(strconv.Itoa(int(q.config.Uid)))
|
||||
if err != nil {
|
||||
q.Logger().WithError(err).WithField("uid", q.config.Uid).Warn("failed to find the user")
|
||||
if _, err := user.Lookup(q.config.User); err != nil {
|
||||
q.Logger().WithError(err).WithFields(
|
||||
logrus.Fields{
|
||||
"user": q.config.User,
|
||||
"uid": q.config.Uid,
|
||||
}).Warn("failed to find the user, it might have been removed")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pkgUtils.RemoveVmmUser(u.Username); err != nil {
|
||||
q.Logger().WithError(err).WithField("user", u.Username).Warn("failed to delete the user")
|
||||
if err := pkgUtils.RemoveVmmUser(q.config.User); err != nil {
|
||||
q.Logger().WithError(err).WithFields(
|
||||
logrus.Fields{
|
||||
"user": q.config.User,
|
||||
"uid": q.config.Uid,
|
||||
}).Warn("failed to delete the user")
|
||||
}
|
||||
q.Logger().WithFields(
|
||||
logrus.Fields{
|
||||
"user": q.config.User,
|
||||
"uid": q.config.Uid,
|
||||
}).Debug("successfully removed the non root user")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -2159,6 +2175,10 @@ func (q *qemu) Disconnect(ctx context.Context) {
|
||||
q.qmpShutdown()
|
||||
}
|
||||
|
||||
func (q *qemu) GetTotalMemoryMB(ctx context.Context) uint32 {
|
||||
return q.config.MemorySize + uint32(q.state.HotpluggedMemory)
|
||||
}
|
||||
|
||||
// ResizeMemory gets a request to update the VM memory to reqMemMB
|
||||
// Memory update is managed with two approaches
|
||||
// Add memory to VM:
|
||||
@@ -2172,7 +2192,7 @@ func (q *qemu) Disconnect(ctx context.Context) {
|
||||
// A longer term solution is evaluate solutions like virtio-mem
|
||||
func (q *qemu) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
|
||||
|
||||
currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory)
|
||||
currentMemory := q.GetTotalMemoryMB(ctx)
|
||||
if err := q.qmpSetup(); err != nil {
|
||||
return 0, MemoryDevice{}, err
|
||||
}
|
||||
|
||||
@@ -75,6 +75,14 @@ const (
|
||||
|
||||
// Restricted permission for shared directory managed by virtiofs
|
||||
sharedDirMode = os.FileMode(0700) | os.ModeDir
|
||||
|
||||
// hotplug factor indicates how much memory can be hotplugged relative to the amount of
|
||||
// RAM provided to the guest. This is a conservative heuristic based on needing 64 bytes per
|
||||
// 4KiB page of hotplugged memory.
|
||||
//
|
||||
// As an example: 12 GiB hotplugged -> 3 Mi pages -> 192 MiBytes overhead (3Mi x 64B).
|
||||
// This is approximately what should be free in a relatively unloaded 256 MiB guest (75% of available memory). So, 256 Mi x 48 => 12 Gi
|
||||
acpiMemoryHotplugFactor = 48
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -2003,9 +2011,60 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
|
||||
}
|
||||
s.Logger().Debugf("Sandbox CPUs: %d", newCPUs)
|
||||
|
||||
// Update Memory
|
||||
s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory")
|
||||
// Update Memory --
|
||||
// If we're using ACPI hotplug for memory, there's a limitation on the amount of memory which can be hotplugged at a single time.
|
||||
// We must have enough free memory in the guest kernel to cover 64bytes per (4KiB) page of memory added for mem_map.
|
||||
// See https://github.com/kata-containers/kata-containers/issues/4847 for more details.
|
||||
// For a typical pod lifecycle, we expect that each container is added when we start the workloads. Based on this, we'll "assume" that majority
|
||||
// of the guest memory is readily available. From experimentation, we see that we can add approximately 48 times what is already provided to
|
||||
// the guest workload. For example, a 256 MiB guest should be able to accommodate hotplugging 12 GiB of memory.
|
||||
//
|
||||
// If virtio-mem is being used, there isn't such a limitation - we can hotplug the maximum allowed memory at a single time.
|
||||
//
|
||||
newMemoryMB := uint32(sandboxMemoryByte >> utils.MibToBytesShift)
|
||||
finalMemoryMB := newMemoryMB
|
||||
|
||||
hconfig := s.hypervisor.HypervisorConfig()
|
||||
|
||||
for {
|
||||
currentMemoryMB := s.hypervisor.GetTotalMemoryMB(ctx)
|
||||
|
||||
maxhotPluggableMemoryMB := currentMemoryMB * acpiMemoryHotplugFactor
|
||||
|
||||
// In the case of virtio-mem, we don't have a restriction on how much can be hotplugged at
|
||||
// a single time. As a result, the max hotpluggable is only limited by the maximum memory size
|
||||
// of the guest.
|
||||
if hconfig.VirtioMem {
|
||||
maxhotPluggableMemoryMB = uint32(hconfig.DefaultMaxMemorySize) - currentMemoryMB
|
||||
}
|
||||
|
||||
deltaMB := int32(finalMemoryMB - currentMemoryMB)
|
||||
|
||||
if deltaMB > int32(maxhotPluggableMemoryMB) {
|
||||
s.Logger().Warnf("Large hotplug. Adding %d MB of %d total memory", maxhotPluggableMemoryMB, deltaMB)
|
||||
newMemoryMB = currentMemoryMB + maxhotPluggableMemoryMB
|
||||
} else {
|
||||
newMemoryMB = finalMemoryMB
|
||||
}
|
||||
|
||||
// Add the memory to the guest and online the memory:
|
||||
if err := s.updateMemory(ctx, newMemoryMB); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if newMemoryMB == finalMemoryMB {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (s *Sandbox) updateMemory(ctx context.Context, newMemoryMB uint32) error {
|
||||
// online the memory:
|
||||
s.Logger().WithField("memory-sandbox-size-mb", newMemoryMB).Debugf("Request to hypervisor to update memory")
|
||||
newMemory, updatedMemoryDevice, err := s.hypervisor.ResizeMemory(ctx, newMemoryMB, s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
|
||||
if err != nil {
|
||||
if err == noGuestMemHotplugErr {
|
||||
@@ -2025,7 +2084,6 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
|
||||
if err := s.agent.onlineCPUMem(ctx, 0, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ func newHypervisorConfig(kernelParams []Param, hParams []Param) HypervisorConfig
|
||||
HypervisorPath: filepath.Join(testDir, testHypervisor),
|
||||
KernelParams: kernelParams,
|
||||
HypervisorParams: hParams,
|
||||
MemorySize: 1,
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1360,7 +1361,6 @@ func TestSandboxUpdateResources(t *testing.T) {
|
||||
contConfig1 := newTestContainerConfigNoop("cont-00001")
|
||||
contConfig2 := newTestContainerConfigNoop("cont-00002")
|
||||
hConfig := newHypervisorConfig(nil, nil)
|
||||
|
||||
defer cleanUp()
|
||||
// create a sandbox
|
||||
s, err := testCreateSandbox(t,
|
||||
@@ -1370,28 +1370,37 @@ func TestSandboxUpdateResources(t *testing.T) {
|
||||
NetworkConfig{},
|
||||
[]ContainerConfig{contConfig1, contConfig2},
|
||||
nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = s.updateResources(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
containerMemLimit := int64(1000)
|
||||
// For mock hypervisor, we MemSlots to be 0 since the memory wasn't changed.
|
||||
assert.Equal(t, s.hypervisor.HypervisorConfig().MemSlots, uint32(0))
|
||||
|
||||
containerMemLimit := int64(4 * 1024 * 1024 * 1024)
|
||||
containerCPUPeriod := uint64(1000)
|
||||
containerCPUQouta := int64(5)
|
||||
for _, c := range s.config.Containers {
|
||||
c.Resources.Memory = &specs.LinuxMemory{
|
||||
for idx := range s.config.Containers {
|
||||
s.config.Containers[idx].Resources.Memory = &specs.LinuxMemory{
|
||||
Limit: new(int64),
|
||||
}
|
||||
c.Resources.CPU = &specs.LinuxCPU{
|
||||
s.config.Containers[idx].Resources.CPU = &specs.LinuxCPU{
|
||||
Period: new(uint64),
|
||||
Quota: new(int64),
|
||||
}
|
||||
c.Resources.Memory.Limit = &containerMemLimit
|
||||
c.Resources.CPU.Period = &containerCPUPeriod
|
||||
c.Resources.CPU.Quota = &containerCPUQouta
|
||||
s.config.Containers[idx].Resources.Memory.Limit = &containerMemLimit
|
||||
s.config.Containers[idx].Resources.CPU.Period = &containerCPUPeriod
|
||||
s.config.Containers[idx].Resources.CPU.Quota = &containerCPUQouta
|
||||
}
|
||||
err = s.updateResources(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Since we're starting with a memory of 1 MB, we expect it to take 3 hotplugs to add 4GiB of memory when using ACPI hotplug:
|
||||
// +48MB
|
||||
// +2352MB
|
||||
// +the remaining
|
||||
assert.Equal(t, s.hypervisor.HypervisorConfig().MemSlots, uint32(3))
|
||||
}
|
||||
|
||||
func TestSandboxExperimentalFeature(t *testing.T) {
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
katacontainers.io/kata-runtime: cleanup
|
||||
containers:
|
||||
- name: kube-kata-cleanup
|
||||
image: quay.io/kata-containers/kata-deploy:2.5.1
|
||||
image: quay.io/kata-containers/kata-deploy:2.5.2
|
||||
imagePullPolicy: Always
|
||||
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset" ]
|
||||
env:
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
serviceAccountName: kata-label-node
|
||||
containers:
|
||||
- name: kube-kata
|
||||
image: quay.io/kata-containers/kata-deploy:2.5.1
|
||||
image: quay.io/kata-containers/kata-deploy:2.5.2
|
||||
imagePullPolicy: Always
|
||||
lifecycle:
|
||||
preStop:
|
||||
|
||||
@@ -471,7 +471,7 @@ install_kata() {
|
||||
if [ "${arch_target}" = "arm64" ]; then
|
||||
install --mode 0644 -D "arch/${arch_target}/boot/Image" "${install_path}/${vmlinux}"
|
||||
elif [ "${arch_target}" = "s390" ]; then
|
||||
install --mode 0644 -D "arch/${arch_target}/boot/compressed/vmlinux" "${install_path}/${vmlinux}"
|
||||
install --mode 0644 -D "arch/${arch_target}/boot/vmlinux" "${install_path}/${vmlinux}"
|
||||
else
|
||||
install --mode 0644 -D "vmlinux" "${install_path}/${vmlinux}"
|
||||
fi
|
||||
|
||||
@@ -12,7 +12,6 @@ CONFIG_SPARSEMEM_VMEMMAP=y
|
||||
# Without these the pmem_should_map_pages() call in the kernel fails with new
|
||||
# Related to the ARCH_HAS_HMM set in the arch files.
|
||||
CONFIG_ZONE_DEVICE=y
|
||||
CONFIG_DEV_PAGEMAP_OPS=y
|
||||
|
||||
CONFIG_ND_PFN=y
|
||||
CONFIG_NVDIMM_PFN=y
|
||||
@@ -23,7 +22,6 @@ CONFIG_BLK_DEV=y
|
||||
CONFIG_BLK_DEV_PMEM=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_LIBNVDIMM=y
|
||||
CONFIG_ND_BLK=y
|
||||
CONFIG_BTT=y
|
||||
# FIXME: Should check if this is really needed
|
||||
# https://github.com/kata-containers/packaging/issues/483
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
# Options needed by HAVE_EBPF_JIT
|
||||
CONFIG_PACK_STACK=y
|
||||
@@ -1 +1 @@
|
||||
95
|
||||
96
|
||||
|
||||
@@ -191,7 +191,7 @@ bump_repo() {
|
||||
|
||||
need_commit=true
|
||||
fi
|
||||
elif [ "${new_version}" != *"rc"* ]; then
|
||||
elif [[ ! "${new_version}" =~ "rc" ]]; then
|
||||
## We are on a stable branch and creating new stable releases.
|
||||
## Need to change kata-deploy / kata-cleanup to use the stable tags.
|
||||
if [[ "${version_to_replace}" =~ "rc" ]]; then
|
||||
|
||||
@@ -153,7 +153,7 @@ assets:
|
||||
kernel:
|
||||
description: "Linux kernel optimised for virtual machines"
|
||||
url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/"
|
||||
version: "v5.15.63"
|
||||
version: "v5.19.2"
|
||||
tdx:
|
||||
description: "Linux kernel that supports TDX"
|
||||
url: "https://github.com/intel/tdx/archive/refs/tags"
|
||||
|
||||
Reference in New Issue
Block a user