runtime: delete sandboxlist.go and sandboxlist_test.go

Delete sandboxlist.go and sandboxlist_test.go under virtcontainers package.

Fixes: #1078

Signed-off-by: bin liu <bin@hyper.sh>
This commit is contained in:
bin liu 2020-11-04 14:29:24 +08:00
parent 61fccef643
commit 290203943c
10 changed files with 15 additions and 295 deletions

View File

@ -231,10 +231,9 @@ func (a *Acrn) appendImage(devices []Device, imagePath string) ([]Device, error)
// Get sandbox and increment the globalIndex.
// This is to make sure the VM rootfs occupies
// the first Index which is /dev/vda.
sandbox, err := globalSandboxList.lookupSandbox(a.id)
if sandbox == nil && err != nil {
return nil, err
}
sandbox := globalSandbox
var err error
if _, err = sandbox.GetAndSetSandboxBlockIndex(); err != nil {
return nil, err
}

View File

@ -230,10 +230,10 @@ func TestAcrnCreateSandbox(t *testing.T) {
state: types.SandboxState{BlockIndexMap: make(map[int]struct{})},
}
err = globalSandboxList.addSandbox(sandbox)
assert.NoError(err)
defer globalSandboxList.removeSandbox(sandbox.id)
globalSandbox = sandbox
defer func() {
globalSandbox = nil
}()
//set PID to 1 to ignore hypercall to get UUID and set a random UUID
a.state.PID = 1

View File

@ -153,10 +153,7 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
}
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
return err
}
s := globalSandbox
defer s.Release()

View File

@ -316,6 +316,6 @@ func TestCleanupContainer(t *testing.T) {
_, err = os.Stat(sandboxDir)
if err == nil {
t.Fatal(err)
t.Fatal("sandbox dir should be deleted")
}
}

View File

@ -9,8 +9,6 @@ import (
"errors"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
exp "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/experimental"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
)
@ -428,117 +426,3 @@ func (c *Container) Restore() error {
c.loadContMounts(cs)
return nil
}
func loadSandboxConfig(id string) (*SandboxConfig, error) {
store, err := persist.GetDriver()
if err != nil || store == nil {
return nil, errors.New("failed to get fs persist driver")
}
ss, _, err := store.FromDisk(id)
if err != nil {
return nil, err
}
savedConf := ss.Config
sconfig := &SandboxConfig{
ID: id,
HypervisorType: HypervisorType(savedConf.HypervisorType),
NetworkConfig: NetworkConfig{
NetNSPath: savedConf.NetworkConfig.NetNSPath,
NetNsCreated: savedConf.NetworkConfig.NetNsCreated,
DisableNewNetNs: savedConf.NetworkConfig.DisableNewNetNs,
InterworkingModel: NetInterworkingModel(savedConf.NetworkConfig.InterworkingModel),
},
ShmSize: savedConf.ShmSize,
SharePidNs: savedConf.SharePidNs,
SystemdCgroup: savedConf.SystemdCgroup,
SandboxCgroupOnly: savedConf.SandboxCgroupOnly,
DisableGuestSeccomp: savedConf.DisableGuestSeccomp,
Cgroups: savedConf.Cgroups,
}
for _, name := range savedConf.Experimental {
sconfig.Experimental = append(sconfig.Experimental, *exp.Get(name))
}
hconf := savedConf.HypervisorConfig
sconfig.HypervisorConfig = HypervisorConfig{
NumVCPUs: hconf.NumVCPUs,
DefaultMaxVCPUs: hconf.DefaultMaxVCPUs,
MemorySize: hconf.MemorySize,
DefaultBridges: hconf.DefaultBridges,
Msize9p: hconf.Msize9p,
MemSlots: hconf.MemSlots,
MemOffset: hconf.MemOffset,
VirtioMem: hconf.VirtioMem,
VirtioFSCacheSize: hconf.VirtioFSCacheSize,
KernelPath: hconf.KernelPath,
ImagePath: hconf.ImagePath,
InitrdPath: hconf.InitrdPath,
FirmwarePath: hconf.FirmwarePath,
MachineAccelerators: hconf.MachineAccelerators,
CPUFeatures: hconf.CPUFeatures,
HypervisorPath: hconf.HypervisorPath,
HypervisorPathList: hconf.HypervisorPathList,
HypervisorCtlPath: hconf.HypervisorCtlPath,
HypervisorCtlPathList: hconf.HypervisorCtlPathList,
JailerPath: hconf.JailerPath,
JailerPathList: hconf.JailerPathList,
BlockDeviceDriver: hconf.BlockDeviceDriver,
HypervisorMachineType: hconf.HypervisorMachineType,
MemoryPath: hconf.MemoryPath,
DevicesStatePath: hconf.DevicesStatePath,
EntropySource: hconf.EntropySource,
SharedFS: hconf.SharedFS,
VirtioFSDaemon: hconf.VirtioFSDaemon,
VirtioFSDaemonList: hconf.VirtioFSDaemonList,
VirtioFSCache: hconf.VirtioFSCache,
VirtioFSExtraArgs: hconf.VirtioFSExtraArgs[:],
BlockDeviceCacheSet: hconf.BlockDeviceCacheSet,
BlockDeviceCacheDirect: hconf.BlockDeviceCacheDirect,
BlockDeviceCacheNoflush: hconf.BlockDeviceCacheNoflush,
DisableBlockDeviceUse: hconf.DisableBlockDeviceUse,
EnableIOThreads: hconf.EnableIOThreads,
Debug: hconf.Debug,
MemPrealloc: hconf.MemPrealloc,
HugePages: hconf.HugePages,
FileBackedMemRootDir: hconf.FileBackedMemRootDir,
FileBackedMemRootList: hconf.FileBackedMemRootList,
Realtime: hconf.Realtime,
Mlock: hconf.Mlock,
DisableNestingChecks: hconf.DisableNestingChecks,
DisableImageNvdimm: hconf.DisableImageNvdimm,
HotplugVFIOOnRootBus: hconf.HotplugVFIOOnRootBus,
PCIeRootPort: hconf.PCIeRootPort,
BootToBeTemplate: hconf.BootToBeTemplate,
BootFromTemplate: hconf.BootFromTemplate,
DisableVhostNet: hconf.DisableVhostNet,
EnableVhostUserStore: hconf.EnableVhostUserStore,
VhostUserStorePath: hconf.VhostUserStorePath,
VhostUserStorePathList: hconf.VhostUserStorePathList,
GuestHookPath: hconf.GuestHookPath,
VMid: hconf.VMid,
RxRateLimiterMaxRate: hconf.RxRateLimiterMaxRate,
TxRateLimiterMaxRate: hconf.TxRateLimiterMaxRate,
SGXEPCSize: hconf.SGXEPCSize,
EnableAnnotations: hconf.EnableAnnotations,
}
sconfig.AgentConfig = KataAgentConfig{
LongLiveConn: savedConf.KataAgentConfig.LongLiveConn,
}
for _, contConf := range savedConf.ContainerConfigs {
sconfig.Containers = append(sconfig.Containers, ContainerConfig{
ID: contConf.ID,
Annotations: contConf.Annotations,
Resources: contConf.Resources,
RootFs: RootFs{
Target: contConf.RootFs,
},
})
}
return sconfig, nil
}

View File

@ -38,7 +38,6 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cpuset"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
@ -55,6 +54,9 @@ const (
DirMode = os.FileMode(0750) | os.ModeDir
)
// globalSandbox tracks sandbox globally
var globalSandbox *Sandbox
// SandboxStatus describes a sandbox status.
type SandboxStatus struct {
ID string
@ -270,7 +272,6 @@ func (s *Sandbox) GetContainer(containerID string) VCContainer {
// Release closes the agent connection and removes sandbox from internal list.
func (s *Sandbox) Release() error {
s.Logger().Info("release sandbox")
globalSandboxList.removeSandbox(s.id)
if s.monitor != nil {
s.monitor.stop()
}
@ -506,15 +507,11 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
if err = globalSandboxList.addSandbox(s); err != nil {
s.newStore.Destroy(s.id)
return nil, err
}
globalSandbox = s
defer func() {
if retErr != nil {
s.Logger().WithError(retErr).WithField("sandboxid", s.id).Error("Create new sandbox failed")
globalSandboxList.removeSandbox(s.id)
s.newStore.Destroy(s.id)
}
}()
@ -636,50 +633,6 @@ func rwLockSandbox(sandboxID string) (func() error, error) {
return store.Lock(sandboxID, true)
}
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
virtLog.Info("fetch sandbox")
if sandboxID == "" {
return nil, vcTypes.ErrNeedSandboxID
}
sandbox, err = globalSandboxList.lookupSandbox(sandboxID)
if sandbox != nil && err == nil {
return sandbox, err
}
var config SandboxConfig
// load sandbox config fromld store.
c, err := loadSandboxConfig(sandboxID)
if err != nil {
virtLog.Warningf("failed to get sandbox config from new store: %v", err)
return nil, err
}
config = *c
// fetchSandbox is not suppose to create new sandbox VM.
sandbox, err = createSandbox(ctx, config, nil)
if err != nil {
return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err)
}
if sandbox.config.SandboxCgroupOnly {
if err := sandbox.createCgroupManager(); err != nil {
return nil, err
}
}
// This sandbox already exists, we don't need to recreate the containers in the guest.
// We only need to fetch the containers from storage and create the container structs.
if err := sandbox.fetchContainers(); err != nil {
return nil, err
}
return sandbox, nil
}
// findContainer returns a container from the containers list held by the
// sandbox structure, based on a container ID.
func (s *Sandbox) findContainer(containerID string) (*Container, error) {
@ -741,8 +694,6 @@ func (s *Sandbox) Delete() error {
}
}
globalSandboxList.removeSandbox(s.id)
if s.monitor != nil {
s.monitor.stop()
}
@ -1145,33 +1096,6 @@ func (s *Sandbox) addContainer(c *Container) error {
return nil
}
// newContainers creates new containers structure and
// adds them to the sandbox. It does not create the containers
// in the guest. This should only be used when fetching a
// sandbox that already exists.
func (s *Sandbox) fetchContainers() error {
for i, contConfig := range s.config.Containers {
// Add spec from bundle path
spec, err := compatoci.GetContainerSpec(contConfig.Annotations)
if err != nil {
return err
}
contConfig.CustomSpec = &spec
s.config.Containers[i] = contConfig
c, err := newContainer(s, &s.config.Containers[i])
if err != nil {
return err
}
if err := s.addContainer(c); err != nil {
return err
}
}
return nil
}
// CreateContainer creates a new container in the sandbox
// This should be called only when the sandbox is already created.
// It will add new container config to sandbox.config.Containers

View File

@ -301,8 +301,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
}
// force state to be read from disk
p2, err := fetchSandbox(context.Background(), p.ID())
assert.NoError(err)
p2 := globalSandbox
if err := testCheckSandboxOnDiskState(p2, newSandboxState); err != nil {
t.Error(err)
@ -1328,9 +1327,6 @@ func checkSandboxRemains() error {
if err = checkDirNotExist(path.Join(kataHostSharedDir(), testSandboxID)); err != nil {
return fmt.Errorf("%s still exists", path.Join(kataHostSharedDir(), testSandboxID))
}
if _, err = globalSandboxList.lookupSandbox(testSandboxID); err == nil {
return fmt.Errorf("globalSandboxList for %s stil exists", testSandboxID)
}
return nil
}

View File

@ -1,49 +0,0 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"fmt"
"sync"
)
type sandboxList struct {
lock sync.RWMutex
sandboxes map[string]*Sandbox
}
// globalSandboxList tracks sandboxes globally
var globalSandboxList = &sandboxList{sandboxes: make(map[string]*Sandbox)}
func (p *sandboxList) addSandbox(sandbox *Sandbox) (err error) {
if sandbox == nil {
return nil
}
p.lock.Lock()
defer p.lock.Unlock()
if p.sandboxes[sandbox.id] == nil {
p.sandboxes[sandbox.id] = sandbox
} else {
err = fmt.Errorf("sandbox %s exists", sandbox.id)
}
return err
}
func (p *sandboxList) removeSandbox(id string) {
p.lock.Lock()
defer p.lock.Unlock()
delete(p.sandboxes, id)
}
func (p *sandboxList) lookupSandbox(id string) (*Sandbox, error) {
p.lock.RLock()
defer p.lock.RUnlock()
if p.sandboxes[id] != nil {
return p.sandboxes[id], nil
}
return nil, fmt.Errorf("sandbox %s does not exist", id)
}

View File

@ -1,31 +0,0 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSandboxListOperations(t *testing.T) {
p := &Sandbox{id: "testsandboxListsandbox"}
l := &sandboxList{sandboxes: make(map[string]*Sandbox)}
err := l.addSandbox(p)
assert.Nil(t, err, "addSandbox failed")
err = l.addSandbox(p)
assert.NotNil(t, err, "add same sandbox should fail")
np, err := l.lookupSandbox(p.id)
assert.Nil(t, err, "lookupSandbox failed")
assert.Equal(t, np, p, "lookupSandbox returns different sandbox %v:%v", np, p)
_, err = l.lookupSandbox("some-non-existing-sandbox-name")
assert.NotNil(t, err, "lookupSandbox for non-existing sandbox should fail")
l.removeSandbox(p.id)
}

View File

@ -58,7 +58,7 @@ var testHyperstartTtySocket = ""
// cleanUp Removes any stale sandbox/container state that can affect
// the next test to run.
func cleanUp() {
globalSandboxList.removeSandbox(testSandboxID)
globalSandbox = nil
os.RemoveAll(fs.MockRunStoragePath())
os.RemoveAll(fs.MockRunVMStoragePath())
syscall.Unmount(getSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)