mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-04 11:06:21 +00:00
virtcontainers: remove sandboxConfig.VMConfig
We can just use hyprvisor config to specify the memory size of a guest. There is no need to maintain the extra place just for memory size. Fixes: #692 Signed-off-by: Peng Tao <bergwolf@gmail.com>
This commit is contained in:
parent
56ba8adc3a
commit
ce288652d5
@ -403,8 +403,6 @@ func updateRuntimeConfig(configPath string, tomlConf tomlConfig, config *oci.Run
|
|||||||
return fmt.Errorf("%v: %v", configPath, err)
|
return fmt.Errorf("%v: %v", configPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.VMConfig.Memory = uint(hConfig.DefaultMemSz)
|
|
||||||
|
|
||||||
config.HypervisorConfig = hConfig
|
config.HypervisorConfig = hConfig
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,10 +166,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
|||||||
|
|
||||||
ShimType: defaultShim,
|
ShimType: defaultShim,
|
||||||
ShimConfig: shimConfig,
|
ShimConfig: shimConfig,
|
||||||
|
|
||||||
VMConfig: vc.Resources{
|
|
||||||
Memory: uint(defaultMemSize),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config = testRuntimeConfig{
|
config = testRuntimeConfig{
|
||||||
@ -1196,12 +1192,10 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
vcpus := uint(2)
|
vcpus := uint(2)
|
||||||
mem := uint(2048)
|
mem := uint32(2048)
|
||||||
|
|
||||||
config := oci.RuntimeConfig{}
|
config := oci.RuntimeConfig{}
|
||||||
expectedVMConfig := vc.Resources{
|
expectedVMConfig := mem
|
||||||
Memory: mem,
|
|
||||||
}
|
|
||||||
|
|
||||||
tomlConf := tomlConfig{
|
tomlConf := tomlConfig{
|
||||||
Hypervisor: map[string]hypervisor{
|
Hypervisor: map[string]hypervisor{
|
||||||
@ -1219,7 +1213,7 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
|
|||||||
err := updateRuntimeConfig("", tomlConf, &config)
|
err := updateRuntimeConfig("", tomlConf, &config)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
assert.Equal(expectedVMConfig, config.VMConfig)
|
assert.Equal(expectedVMConfig, config.HypervisorConfig.DefaultMemSz)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) {
|
func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) {
|
||||||
|
@ -43,23 +43,17 @@ func Example_createAndStartSandbox() {
|
|||||||
KernelPath: "/usr/share/kata-containers/vmlinux.container",
|
KernelPath: "/usr/share/kata-containers/vmlinux.container",
|
||||||
ImagePath: "/usr/share/kata-containers/kata-containers.img",
|
ImagePath: "/usr/share/kata-containers/kata-containers.img",
|
||||||
HypervisorPath: "/usr/bin/qemu-system-x86_64",
|
HypervisorPath: "/usr/bin/qemu-system-x86_64",
|
||||||
|
DefaultMemSz: 1024,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use hyperstart default values for the agent.
|
// Use hyperstart default values for the agent.
|
||||||
agConfig := vc.HyperConfig{}
|
agConfig := vc.HyperConfig{}
|
||||||
|
|
||||||
// VM resources
|
|
||||||
vmConfig := vc.Resources{
|
|
||||||
Memory: 1024,
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sandbox configuration:
|
// The sandbox configuration:
|
||||||
// - One container
|
// - One container
|
||||||
// - Hypervisor is QEMU
|
// - Hypervisor is QEMU
|
||||||
// - Agent is hyperstart
|
// - Agent is hyperstart
|
||||||
sandboxConfig := vc.SandboxConfig{
|
sandboxConfig := vc.SandboxConfig{
|
||||||
VMConfig: vmConfig,
|
|
||||||
|
|
||||||
HypervisorType: vc.QemuHypervisor,
|
HypervisorType: vc.QemuHypervisor,
|
||||||
HypervisorConfig: hypervisorConfig,
|
HypervisorConfig: hypervisorConfig,
|
||||||
|
|
||||||
|
@ -173,6 +173,7 @@ func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
|
|||||||
KernelPath: kernelPath,
|
KernelPath: kernelPath,
|
||||||
ImagePath: "/usr/share/clear-containers/clear-containers.img",
|
ImagePath: "/usr/share/clear-containers/clear-containers.img",
|
||||||
HypervisorMachineType: machineType,
|
HypervisorMachineType: machineType,
|
||||||
|
DefaultMemSz: uint32(vmMemory),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := buildKernelParams(&hypervisorConfig); err != nil {
|
if err := buildKernelParams(&hypervisorConfig); err != nil {
|
||||||
@ -195,10 +196,6 @@ func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
|
|||||||
|
|
||||||
shimConfig := getShimConfig(*shimType, shimPath)
|
shimConfig := getShimConfig(*shimType, shimPath)
|
||||||
|
|
||||||
vmConfig := vc.Resources{
|
|
||||||
Memory: vmMemory,
|
|
||||||
}
|
|
||||||
|
|
||||||
id := context.String("id")
|
id := context.String("id")
|
||||||
if id == "" {
|
if id == "" {
|
||||||
// auto-generate sandbox name
|
// auto-generate sandbox name
|
||||||
@ -206,8 +203,7 @@ func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sandboxConfig := vc.SandboxConfig{
|
sandboxConfig := vc.SandboxConfig{
|
||||||
ID: id,
|
ID: id,
|
||||||
VMConfig: vmConfig,
|
|
||||||
|
|
||||||
HypervisorType: vc.QemuHypervisor,
|
HypervisorType: vc.QemuHypervisor,
|
||||||
HypervisorConfig: hypervisorConfig,
|
HypervisorConfig: hypervisorConfig,
|
||||||
|
@ -182,7 +182,6 @@ type HypervisorConfig struct {
|
|||||||
DefaultMaxVCPUs uint32
|
DefaultMaxVCPUs uint32
|
||||||
|
|
||||||
// DefaultMem specifies default memory size in MiB for the VM.
|
// DefaultMem specifies default memory size in MiB for the VM.
|
||||||
// Sandbox configuration VMConfig.Memory overwrites this.
|
|
||||||
DefaultMemSz uint32
|
DefaultMemSz uint32
|
||||||
|
|
||||||
// DefaultBridges specifies default number of bridges for the VM.
|
// DefaultBridges specifies default number of bridges for the VM.
|
||||||
@ -546,7 +545,7 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
|
|||||||
// hypervisor is the virtcontainers hypervisor interface.
|
// hypervisor is the virtcontainers hypervisor interface.
|
||||||
// The default hypervisor implementation is Qemu.
|
// The default hypervisor implementation is Qemu.
|
||||||
type hypervisor interface {
|
type hypervisor interface {
|
||||||
init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error
|
init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error
|
||||||
|
|
||||||
createSandbox() error
|
createSandbox() error
|
||||||
startSandbox() error
|
startSandbox() error
|
||||||
|
@ -11,7 +11,7 @@ type mockHypervisor struct {
|
|||||||
vCPUs uint32
|
vCPUs uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
|
func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||||
err := hypervisorConfig.valid()
|
err := hypervisorConfig.valid()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -29,7 +29,7 @@ func TestMockHypervisorInit(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// wrong config
|
// wrong config
|
||||||
if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil {
|
if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err == nil {
|
||||||
t.Fatal()
|
t.Fatal()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ func TestMockHypervisorInit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// right config
|
// right config
|
||||||
if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
|
if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -100,8 +100,6 @@ type FactoryConfig struct {
|
|||||||
|
|
||||||
// RuntimeConfig aggregates all runtime specific settings
|
// RuntimeConfig aggregates all runtime specific settings
|
||||||
type RuntimeConfig struct {
|
type RuntimeConfig struct {
|
||||||
VMConfig vc.Resources
|
|
||||||
|
|
||||||
HypervisorType vc.HypervisorType
|
HypervisorType vc.HypervisorType
|
||||||
HypervisorConfig vc.HypervisorConfig
|
HypervisorConfig vc.HypervisorConfig
|
||||||
|
|
||||||
@ -406,25 +404,23 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
|
|||||||
return "", fmt.Errorf("Could not find sandbox ID")
|
return "", fmt.Errorf("Could not find sandbox ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
func vmConfig(ocispec CompatOCISpec, config RuntimeConfig) (vc.Resources, error) {
|
func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
|
||||||
resources := config.VMConfig
|
|
||||||
|
|
||||||
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
|
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
|
||||||
return resources, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ocispec.Linux.Resources.Memory != nil &&
|
if ocispec.Linux.Resources.Memory != nil &&
|
||||||
ocispec.Linux.Resources.Memory.Limit != nil {
|
ocispec.Linux.Resources.Memory.Limit != nil {
|
||||||
memBytes := *ocispec.Linux.Resources.Memory.Limit
|
memBytes := *ocispec.Linux.Resources.Memory.Limit
|
||||||
if memBytes <= 0 {
|
if memBytes <= 0 {
|
||||||
return vc.Resources{}, fmt.Errorf("Invalid OCI memory limit %d", memBytes)
|
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
|
||||||
}
|
}
|
||||||
// Use some math magic to round up to the nearest Mb.
|
// Use some math magic to round up to the nearest Mb.
|
||||||
// This has the side effect that we can never have <1Mb assigned.
|
// This has the side effect that we can never have <1Mb assigned.
|
||||||
resources.Memory = uint((memBytes + (1024*1024 - 1)) / (1024 * 1024))
|
config.HypervisorConfig.DefaultMemSz = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
||||||
@ -465,7 +461,7 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
|
|||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resources, err := vmConfig(ocispec, runtime)
|
err = updateVMConfig(ocispec, &runtime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
}
|
}
|
||||||
@ -480,8 +476,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
|
|||||||
|
|
||||||
Hostname: ocispec.Hostname,
|
Hostname: ocispec.Hostname,
|
||||||
|
|
||||||
VMConfig: resources,
|
|
||||||
|
|
||||||
HypervisorType: runtime.HypervisorType,
|
HypervisorType: runtime.HypervisorType,
|
||||||
HypervisorConfig: runtime.HypervisorConfig,
|
HypervisorConfig: runtime.HypervisorConfig,
|
||||||
|
|
||||||
|
@ -252,18 +252,17 @@ func TestMinimalSandboxConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVmConfig(t *testing.T) {
|
func TestUpdateVmConfig(t *testing.T) {
|
||||||
var limitBytes int64 = 128 * 1024 * 1024
|
var limitBytes int64 = 128 * 1024 * 1024
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
config := RuntimeConfig{
|
config := RuntimeConfig{
|
||||||
VMConfig: vc.Resources{
|
HypervisorConfig: vc.HypervisorConfig{
|
||||||
Memory: 2048,
|
DefaultMemSz: 2048,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedResources := vc.Resources{
|
expectedMem := uint32(128)
|
||||||
Memory: 128,
|
|
||||||
}
|
|
||||||
|
|
||||||
ocispec := CompatOCISpec{
|
ocispec := CompatOCISpec{
|
||||||
Spec: specs.Spec{
|
Spec: specs.Spec{
|
||||||
@ -277,48 +276,28 @@ func TestVmConfig(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resources, err := vmConfig(ocispec, config)
|
err := updateVMConfig(ocispec, &config)
|
||||||
if err != nil {
|
assert.Nil(err)
|
||||||
t.Fatal(err)
|
assert.Equal(config.HypervisorConfig.DefaultMemSz, expectedMem)
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(resources, expectedResources) == false {
|
|
||||||
t.Fatalf("Got %v\n expecting %v", resources, expectedResources)
|
|
||||||
}
|
|
||||||
|
|
||||||
limitBytes = -128 * 1024 * 1024
|
limitBytes = -128 * 1024 * 1024
|
||||||
ocispec.Linux.Resources.Memory.Limit = &limitBytes
|
ocispec.Linux.Resources.Memory.Limit = &limitBytes
|
||||||
|
|
||||||
resources, err = vmConfig(ocispec, config)
|
err = updateVMConfig(ocispec, &config)
|
||||||
if err == nil {
|
assert.NotNil(err)
|
||||||
t.Fatalf("Got %v\n expecting error", resources)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test case when Memory is nil
|
// Test case when Memory is nil
|
||||||
ocispec.Spec.Linux.Resources.Memory = nil
|
ocispec.Spec.Linux.Resources.Memory = nil
|
||||||
expectedResources.Memory = config.VMConfig.Memory
|
err = updateVMConfig(ocispec, &config)
|
||||||
resources, err = vmConfig(ocispec, config)
|
assert.Nil(err)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(resources, expectedResources) == false {
|
|
||||||
t.Fatalf("Got %v\n expecting %v", resources, expectedResources)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test case when CPU is nil
|
// Test case when CPU is nil
|
||||||
ocispec.Spec.Linux.Resources.CPU = nil
|
ocispec.Spec.Linux.Resources.CPU = nil
|
||||||
limitBytes = 20
|
limitBytes = 20
|
||||||
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
|
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
|
||||||
expectedResources.Memory = 1
|
err = updateVMConfig(ocispec, &config)
|
||||||
resources, err = vmConfig(ocispec, config)
|
assert.Nil(err)
|
||||||
if err != nil {
|
assert.NotEqual(config.HypervisorConfig.DefaultMemSz, expectedMem)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(resources, expectedResources) == false {
|
|
||||||
t.Fatalf("Got %v\n expecting %v", resources, expectedResources)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
||||||
|
@ -51,8 +51,6 @@ type QemuState struct {
|
|||||||
type qemu struct {
|
type qemu struct {
|
||||||
id string
|
id string
|
||||||
|
|
||||||
vmConfig Resources
|
|
||||||
|
|
||||||
storage resourceStorage
|
storage resourceStorage
|
||||||
|
|
||||||
config HypervisorConfig
|
config HypervisorConfig
|
||||||
@ -197,7 +195,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// init intializes the Qemu structure.
|
// init intializes the Qemu structure.
|
||||||
func (q *qemu) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
|
func (q *qemu) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||||
// save
|
// save
|
||||||
q.ctx = ctx
|
q.ctx = ctx
|
||||||
|
|
||||||
@ -211,7 +209,6 @@ func (q *qemu) init(ctx context.Context, id string, hypervisorConfig *Hypervisor
|
|||||||
|
|
||||||
q.id = id
|
q.id = id
|
||||||
q.storage = storage
|
q.storage = storage
|
||||||
q.vmConfig = vmConfig
|
|
||||||
q.config = *hypervisorConfig
|
q.config = *hypervisorConfig
|
||||||
q.arch = newQemuArch(q.config)
|
q.arch = newQemuArch(q.config)
|
||||||
|
|
||||||
@ -273,9 +270,6 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
memMb := uint64(q.config.DefaultMemSz)
|
memMb := uint64(q.config.DefaultMemSz)
|
||||||
if q.vmConfig.Memory > 0 {
|
|
||||||
memMb = uint64(q.vmConfig.Memory)
|
|
||||||
}
|
|
||||||
|
|
||||||
return q.arch.memoryTopology(memMb, hostMemMb), nil
|
return q.arch.memoryTopology(memMb, hostMemMb), nil
|
||||||
}
|
}
|
||||||
@ -1039,11 +1033,7 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculate current memory
|
// calculate current memory
|
||||||
currentMemory := int(q.config.DefaultMemSz)
|
currentMemory := int(q.config.DefaultMemSz) + q.state.HotpluggedMemory
|
||||||
if q.vmConfig.Memory > 0 {
|
|
||||||
currentMemory = int(q.vmConfig.Memory)
|
|
||||||
}
|
|
||||||
currentMemory += q.state.HotpluggedMemory
|
|
||||||
|
|
||||||
// Don't exceed the maximum amount of memory
|
// Don't exceed the maximum amount of memory
|
||||||
if currentMemory+memDev.sizeMB > int(maxMem) {
|
if currentMemory+memDev.sizeMB > int(maxMem) {
|
||||||
|
@ -87,7 +87,7 @@ func TestQemuInit(t *testing.T) {
|
|||||||
t.Fatalf("Could not create parent directory %s: %v", parentDir, err)
|
t.Fatalf("Could not create parent directory %s: %v", parentDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
|
if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ func TestQemuInitMissingParentDirFail(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
|
if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||||
t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err)
|
t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,10 +168,6 @@ func TestQemuMemoryTopology(t *testing.T) {
|
|||||||
MaxMem: memMax,
|
MaxMem: memMax,
|
||||||
}
|
}
|
||||||
|
|
||||||
q.vmConfig = Resources{
|
|
||||||
Memory: uint(mem),
|
|
||||||
}
|
|
||||||
|
|
||||||
memory, err := q.memoryTopology()
|
memory, err := q.memoryTopology()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -319,9 +319,6 @@ type SandboxConfig struct {
|
|||||||
|
|
||||||
Hostname string
|
Hostname string
|
||||||
|
|
||||||
// VMConfig is the VM configuration to set for this sandbox.
|
|
||||||
VMConfig Resources
|
|
||||||
|
|
||||||
HypervisorType HypervisorType
|
HypervisorType HypervisorType
|
||||||
HypervisorConfig HypervisorConfig
|
HypervisorConfig HypervisorConfig
|
||||||
|
|
||||||
@ -830,7 +827,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = s.hypervisor.init(ctx, s.id, &sandboxConfig.HypervisorConfig, sandboxConfig.VMConfig, s.storage); err != nil {
|
if err = s.hypervisor.init(ctx, s.id, &sandboxConfig.HypervisorConfig, s.storage); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = hypervisor.init(ctx, id, &config.HypervisorConfig, Resources{}, &filesystem{}); err != nil {
|
if err = hypervisor.init(ctx, id, &config.HypervisorConfig, &filesystem{}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user