Merge pull request #1079 from liubin/fix/1078-delete-sandboxlist

runtime: delete sandboxlist.go and sandboxlist_test.go
This commit is contained in:
Peng Tao 2020-11-13 15:02:51 +08:00 committed by GitHub
commit 3b5245fc55
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 62 additions and 333 deletions

View File

@ -312,17 +312,17 @@ func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err e
switch containerType {
case vc.PodSandbox:
err = cleanupContainer(ctx, s.id, s.id, path)
err = cleanupContainer(ctx, s.sandbox, s.id, path)
if err != nil {
return nil, err
}
case vc.PodContainer:
sandboxID, err := oci.SandboxID(ociSpec)
_, err := oci.SandboxID(ociSpec)
if err != nil {
return nil, err
}
err = cleanupContainer(ctx, sandboxID, s.id, path)
err = cleanupContainer(ctx, s.sandbox, s.id, path)
if err != nil {
return nil, err
}

View File

@ -31,10 +31,10 @@ func cReap(s *service, status int, id, execid string, exitat time.Time) {
}
}
func cleanupContainer(ctx context.Context, sid, cid, bundlePath string) error {
func cleanupContainer(ctx context.Context, sandbox vc.VCSandbox, cid, bundlePath string) error {
shimLog.WithField("service", "cleanup").WithField("container", cid).Info("Cleanup container")
err := vci.CleanupContainer(ctx, sid, cid, true)
err := vci.CleanupContainer(ctx, sandbox, cid, true)
if err != nil {
shimLog.WithError(err).WithField("container", cid).Warn("failed to cleanup container")
return err

View File

@ -88,6 +88,7 @@ type Acrn struct {
arch acrnArch
ctx context.Context
store persistapi.PersistDriver
sandbox *Sandbox
}
type acrnPlatformInfo struct {
@ -231,11 +232,9 @@ func (a *Acrn) appendImage(devices []Device, imagePath string) ([]Device, error)
// Get sandbox and increment the globalIndex.
// This is to make sure the VM rootfs occupies
// the first Index which is /dev/vda.
sandbox, err := globalSandboxList.lookupSandbox(a.id)
if sandbox == nil && err != nil {
return nil, err
}
if _, err = sandbox.GetAndSetSandboxBlockIndex(); err != nil {
var err error
if _, err = a.sandbox.GetAndSetSandboxBlockIndex(); err != nil {
return nil, err
}
@ -822,3 +821,7 @@ func (a *Acrn) loadInfo() error {
func (a *Acrn) isRateLimiterBuiltin() bool {
return false
}
func (a *Acrn) setSandbox(sandbox *Sandbox) {
a.sandbox = sandbox
}

View File

@ -230,10 +230,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
state: types.SandboxState{BlockIndexMap: make(map[int]struct{})},
}
err = globalSandboxList.addSandbox(sandbox)
assert.NoError(err)
defer globalSandboxList.removeSandbox(sandbox.id)
a.sandbox = sandbox
//set PID to 1 to ignore hypercall to get UUID and set a random UUID
a.state.PID = 1

View File

@ -7,6 +7,7 @@ package virtcontainers
import (
"context"
"fmt"
"runtime"
deviceApi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -135,29 +136,24 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
// CleanupContainer is used by shimv2 to stop and delete a container exclusively, once there is no container
// in the sandbox left, do stop the sandbox and delete it. Those serial operations will be done exclusively by
// locking the sandbox.
func CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error {
func CleanupContainer(ctx context.Context, sandbox VCSandbox, containerID string, force bool) error {
span, ctx := trace(ctx, "CleanupContainer")
defer span.Finish()
if sandboxID == "" {
return vcTypes.ErrNeedSandboxID
}
if containerID == "" {
return vcTypes.ErrNeedContainerID
}
unlock, err := rwLockSandbox(sandboxID)
s, ok := sandbox.(*Sandbox)
if !ok {
return fmt.Errorf("not a Sandbox reference")
}
unlock, err := rwLockSandbox(s.id)
if err != nil {
return err
}
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
return err
}
defer s.Release()
_, err = s.StopContainer(containerID, force)

View File

@ -307,7 +307,7 @@ func TestCleanupContainer(t *testing.T) {
}
for _, c := range p.GetAllContainers() {
CleanupContainer(ctx, p.ID(), c.ID(), true)
CleanupContainer(ctx, p, c.ID(), true)
}
s, ok := p.(*Sandbox)
@ -316,6 +316,6 @@ func TestCleanupContainer(t *testing.T) {
_, err = os.Stat(sandboxDir)
if err == nil {
t.Fatal(err)
t.Fatal("sandbox dir should be deleted")
}
}

View File

@ -1297,3 +1297,6 @@ func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) {
func (clh *cloudHypervisor) isRateLimiterBuiltin() bool {
return false
}
func (clh *cloudHypervisor) setSandbox(sandbox *Sandbox) {
}

View File

@ -1247,3 +1247,6 @@ func revertBytes(num uint64) uint64 {
return 1024*revertBytes(a) + b
}
}
func (fc *firecracker) setSandbox(sandbox *Sandbox) {
}

View File

@ -814,4 +814,6 @@ type hypervisor interface {
// check if hypervisor supports built-in rate limiter.
isRateLimiterBuiltin() bool
setSandbox(sandbox *Sandbox)
}

View File

@ -38,6 +38,6 @@ func (impl *VCImpl) CreateSandbox(ctx context.Context, sandboxConfig SandboxConf
// CleanupContainer is used by shimv2 to stop and delete a container exclusively, once there is no container
// in the sandbox left, do stop the sandbox and delete it. Those serial operations will be done exclusively by
// locking the sandbox.
func (impl *VCImpl) CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error {
return CleanupContainer(ctx, sandboxID, containerID, force)
func (impl *VCImpl) CleanupContainer(ctx context.Context, sandbox VCSandbox, containerID string, force bool) error {
return CleanupContainer(ctx, sandbox, containerID, force)
}

View File

@ -24,7 +24,7 @@ type VC interface {
SetFactory(ctx context.Context, factory Factory)
CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig) (VCSandbox, error)
CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error
CleanupContainer(ctx context.Context, sandbox VCSandbox, containerID string, force bool) error
}
// VCSandbox is the Sandbox interface

View File

@ -136,3 +136,6 @@ func (m *mockHypervisor) generateSocket(id string) (interface{}, error) {
func (m *mockHypervisor) isRateLimiterBuiltin() bool {
return false
}
func (m *mockHypervisor) setSandbox(sandbox *Sandbox) {
}

View File

@ -9,8 +9,6 @@ import (
"errors"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
exp "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/experimental"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
)
@ -428,117 +426,3 @@ func (c *Container) Restore() error {
c.loadContMounts(cs)
return nil
}
func loadSandboxConfig(id string) (*SandboxConfig, error) {
store, err := persist.GetDriver()
if err != nil || store == nil {
return nil, errors.New("failed to get fs persist driver")
}
ss, _, err := store.FromDisk(id)
if err != nil {
return nil, err
}
savedConf := ss.Config
sconfig := &SandboxConfig{
ID: id,
HypervisorType: HypervisorType(savedConf.HypervisorType),
NetworkConfig: NetworkConfig{
NetNSPath: savedConf.NetworkConfig.NetNSPath,
NetNsCreated: savedConf.NetworkConfig.NetNsCreated,
DisableNewNetNs: savedConf.NetworkConfig.DisableNewNetNs,
InterworkingModel: NetInterworkingModel(savedConf.NetworkConfig.InterworkingModel),
},
ShmSize: savedConf.ShmSize,
SharePidNs: savedConf.SharePidNs,
SystemdCgroup: savedConf.SystemdCgroup,
SandboxCgroupOnly: savedConf.SandboxCgroupOnly,
DisableGuestSeccomp: savedConf.DisableGuestSeccomp,
Cgroups: savedConf.Cgroups,
}
for _, name := range savedConf.Experimental {
sconfig.Experimental = append(sconfig.Experimental, *exp.Get(name))
}
hconf := savedConf.HypervisorConfig
sconfig.HypervisorConfig = HypervisorConfig{
NumVCPUs: hconf.NumVCPUs,
DefaultMaxVCPUs: hconf.DefaultMaxVCPUs,
MemorySize: hconf.MemorySize,
DefaultBridges: hconf.DefaultBridges,
Msize9p: hconf.Msize9p,
MemSlots: hconf.MemSlots,
MemOffset: hconf.MemOffset,
VirtioMem: hconf.VirtioMem,
VirtioFSCacheSize: hconf.VirtioFSCacheSize,
KernelPath: hconf.KernelPath,
ImagePath: hconf.ImagePath,
InitrdPath: hconf.InitrdPath,
FirmwarePath: hconf.FirmwarePath,
MachineAccelerators: hconf.MachineAccelerators,
CPUFeatures: hconf.CPUFeatures,
HypervisorPath: hconf.HypervisorPath,
HypervisorPathList: hconf.HypervisorPathList,
HypervisorCtlPath: hconf.HypervisorCtlPath,
HypervisorCtlPathList: hconf.HypervisorCtlPathList,
JailerPath: hconf.JailerPath,
JailerPathList: hconf.JailerPathList,
BlockDeviceDriver: hconf.BlockDeviceDriver,
HypervisorMachineType: hconf.HypervisorMachineType,
MemoryPath: hconf.MemoryPath,
DevicesStatePath: hconf.DevicesStatePath,
EntropySource: hconf.EntropySource,
SharedFS: hconf.SharedFS,
VirtioFSDaemon: hconf.VirtioFSDaemon,
VirtioFSDaemonList: hconf.VirtioFSDaemonList,
VirtioFSCache: hconf.VirtioFSCache,
VirtioFSExtraArgs: hconf.VirtioFSExtraArgs[:],
BlockDeviceCacheSet: hconf.BlockDeviceCacheSet,
BlockDeviceCacheDirect: hconf.BlockDeviceCacheDirect,
BlockDeviceCacheNoflush: hconf.BlockDeviceCacheNoflush,
DisableBlockDeviceUse: hconf.DisableBlockDeviceUse,
EnableIOThreads: hconf.EnableIOThreads,
Debug: hconf.Debug,
MemPrealloc: hconf.MemPrealloc,
HugePages: hconf.HugePages,
FileBackedMemRootDir: hconf.FileBackedMemRootDir,
FileBackedMemRootList: hconf.FileBackedMemRootList,
Realtime: hconf.Realtime,
Mlock: hconf.Mlock,
DisableNestingChecks: hconf.DisableNestingChecks,
DisableImageNvdimm: hconf.DisableImageNvdimm,
HotplugVFIOOnRootBus: hconf.HotplugVFIOOnRootBus,
PCIeRootPort: hconf.PCIeRootPort,
BootToBeTemplate: hconf.BootToBeTemplate,
BootFromTemplate: hconf.BootFromTemplate,
DisableVhostNet: hconf.DisableVhostNet,
EnableVhostUserStore: hconf.EnableVhostUserStore,
VhostUserStorePath: hconf.VhostUserStorePath,
VhostUserStorePathList: hconf.VhostUserStorePathList,
GuestHookPath: hconf.GuestHookPath,
VMid: hconf.VMid,
RxRateLimiterMaxRate: hconf.RxRateLimiterMaxRate,
TxRateLimiterMaxRate: hconf.TxRateLimiterMaxRate,
SGXEPCSize: hconf.SGXEPCSize,
EnableAnnotations: hconf.EnableAnnotations,
}
sconfig.AgentConfig = KataAgentConfig{
LongLiveConn: savedConf.KataAgentConfig.LongLiveConn,
}
for _, contConf := range savedConf.ContainerConfigs {
sconfig.Containers = append(sconfig.Containers, ContainerConfig{
ID: contConf.ID,
Annotations: contConf.Annotations,
Resources: contConf.Resources,
RootFs: RootFs{
Target: contConf.RootFs,
},
})
}
return sconfig, nil
}

View File

@ -18,6 +18,7 @@ package vcmock
import (
"context"
"fmt"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/sirupsen/logrus"
)
@ -49,9 +50,9 @@ func (m *VCMock) CreateSandbox(ctx context.Context, sandboxConfig vc.SandboxConf
return nil, fmt.Errorf("%s: %s (%+v): sandboxConfig: %v", mockErrorPrefix, getSelf(), m, sandboxConfig)
}
func (m *VCMock) CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error {
func (m *VCMock) CleanupContainer(ctx context.Context, sandbox vc.VCSandbox, containerID string, force bool) error {
if m.CleanupContainerFunc != nil {
return m.CleanupContainerFunc(ctx, sandboxID, containerID, true)
return m.CleanupContainerFunc(ctx, sandbox, containerID, true)
}
return fmt.Errorf("%s: %s (%+v): sandboxID: %v", mockErrorPrefix, getSelf(), m, sandboxID)
return fmt.Errorf("%s: %s (%+v): sandbox: %v", mockErrorPrefix, getSelf(), m, sandbox)
}

View File

@ -17,13 +17,13 @@ import (
)
const (
testSandboxID = "testSandboxID"
testContainerID = "testContainerID"
)
var (
loggerTriggered = 0
factoryTriggered = 0
loggerTriggered = 0
factoryTriggered = 0
testSandbox vc.VCSandbox = &Sandbox{}
)
func TestVCImplementations(t *testing.T) {
@ -178,21 +178,21 @@ func TestVCMockCleanupContainer(t *testing.T) {
assert.Nil(m.CleanupContainerFunc)
ctx := context.Background()
err := m.CleanupContainer(ctx, testSandboxID, testContainerID, false)
err := m.CleanupContainer(ctx, testSandbox, testContainerID, false)
assert.Error(err)
assert.True(IsMockError(err))
m.CleanupContainerFunc = func(ctx context.Context, sandboxID, containerID string, force bool) error {
m.CleanupContainerFunc = func(ctx context.Context, sandbox vc.VCSandbox, containerID string, force bool) error {
return nil
}
err = m.CleanupContainer(ctx, testSandboxID, testContainerID, false)
err = m.CleanupContainer(ctx, testSandbox, testContainerID, false)
assert.NoError(err)
// reset
m.CleanupContainerFunc = nil
err = m.CleanupContainer(ctx, testSandboxID, testContainerID, false)
err = m.CleanupContainer(ctx, testSandbox, testContainerID, false)
assert.Error(err)
assert.True(IsMockError(err))
}
@ -204,21 +204,21 @@ func TestVCMockForceCleanupContainer(t *testing.T) {
assert.Nil(m.CleanupContainerFunc)
ctx := context.Background()
err := m.CleanupContainer(ctx, testSandboxID, testContainerID, true)
err := m.CleanupContainer(ctx, testSandbox, testContainerID, true)
assert.Error(err)
assert.True(IsMockError(err))
m.CleanupContainerFunc = func(ctx context.Context, sandboxID, containerID string, force bool) error {
m.CleanupContainerFunc = func(ctx context.Context, sandbox vc.VCSandbox, containerID string, force bool) error {
return nil
}
err = m.CleanupContainer(ctx, testSandboxID, testContainerID, true)
err = m.CleanupContainer(ctx, testSandbox, testContainerID, true)
assert.NoError(err)
// reset
m.CleanupContainerFunc = nil
err = m.CleanupContainer(ctx, testSandboxID, testContainerID, true)
err = m.CleanupContainer(ctx, testSandbox, testContainerID, true)
assert.Error(err)
assert.True(IsMockError(err))
}

View File

@ -87,6 +87,6 @@ type VCMock struct {
SetLoggerFunc func(ctx context.Context, logger *logrus.Entry)
SetFactoryFunc func(ctx context.Context, factory vc.Factory)
CreateSandboxFunc func(ctx context.Context, sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error)
CleanupContainerFunc func(ctx context.Context, sandboxID, containerID string, force bool) error
CreateSandboxFunc func(ctx context.Context, sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error)
CleanupContainerFunc func(ctx context.Context, sandbox vc.VCSandbox, containerID string, force bool) error
}

View File

@ -2389,3 +2389,6 @@ func (q *qemu) generateSocket(id string) (interface{}, error) {
func (q *qemu) isRateLimiterBuiltin() bool {
return false
}
func (q *qemu) setSandbox(sandbox *Sandbox) {
}

View File

@ -38,7 +38,6 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cpuset"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
@ -270,7 +269,6 @@ func (s *Sandbox) GetContainer(containerID string) VCContainer {
// Release closes the agent connection and removes sandbox from internal list.
func (s *Sandbox) Release() error {
s.Logger().Info("release sandbox")
globalSandboxList.removeSandbox(s.id)
if s.monitor != nil {
s.monitor.stop()
}
@ -502,19 +500,15 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
ctx: ctx,
}
hypervisor.setSandbox(s)
if s.newStore, err = persist.GetDriver(); err != nil || s.newStore == nil {
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
if err = globalSandboxList.addSandbox(s); err != nil {
s.newStore.Destroy(s.id)
return nil, err
}
defer func() {
if retErr != nil {
s.Logger().WithError(retErr).WithField("sandboxid", s.id).Error("Create new sandbox failed")
globalSandboxList.removeSandbox(s.id)
s.newStore.Destroy(s.id)
}
}()
@ -636,50 +630,6 @@ func rwLockSandbox(sandboxID string) (func() error, error) {
return store.Lock(sandboxID, true)
}
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
virtLog.Info("fetch sandbox")
if sandboxID == "" {
return nil, vcTypes.ErrNeedSandboxID
}
sandbox, err = globalSandboxList.lookupSandbox(sandboxID)
if sandbox != nil && err == nil {
return sandbox, err
}
var config SandboxConfig
// load sandbox config fromld store.
c, err := loadSandboxConfig(sandboxID)
if err != nil {
virtLog.Warningf("failed to get sandbox config from new store: %v", err)
return nil, err
}
config = *c
// fetchSandbox is not suppose to create new sandbox VM.
sandbox, err = createSandbox(ctx, config, nil)
if err != nil {
return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err)
}
if sandbox.config.SandboxCgroupOnly {
if err := sandbox.createCgroupManager(); err != nil {
return nil, err
}
}
// This sandbox already exists, we don't need to recreate the containers in the guest.
// We only need to fetch the containers from storage and create the container structs.
if err := sandbox.fetchContainers(); err != nil {
return nil, err
}
return sandbox, nil
}
// findContainer returns a container from the containers list held by the
// sandbox structure, based on a container ID.
func (s *Sandbox) findContainer(containerID string) (*Container, error) {
@ -741,8 +691,6 @@ func (s *Sandbox) Delete() error {
}
}
globalSandboxList.removeSandbox(s.id)
if s.monitor != nil {
s.monitor.stop()
}
@ -1145,33 +1093,6 @@ func (s *Sandbox) addContainer(c *Container) error {
return nil
}
// newContainers creates new containers structure and
// adds them to the sandbox. It does not create the containers
// in the guest. This should only be used when fetching a
// sandbox that already exists.
func (s *Sandbox) fetchContainers() error {
for i, contConfig := range s.config.Containers {
// Add spec from bundle path
spec, err := compatoci.GetContainerSpec(contConfig.Annotations)
if err != nil {
return err
}
contConfig.CustomSpec = &spec
s.config.Containers[i] = contConfig
c, err := newContainer(s, &s.config.Containers[i])
if err != nil {
return err
}
if err := s.addContainer(c); err != nil {
return err
}
}
return nil
}
// CreateContainer creates a new container in the sandbox
// This should be called only when the sandbox is already created.
// It will add new container config to sandbox.config.Containers

View File

@ -301,14 +301,11 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
}
// force state to be read from disk
p2, err := fetchSandbox(context.Background(), p.ID())
assert.NoError(err)
if err := testCheckSandboxOnDiskState(p2, newSandboxState); err != nil {
if err := testCheckSandboxOnDiskState(p, newSandboxState); err != nil {
t.Error(err)
}
c2, err := p2.findContainer(contID)
c2, err := p.findContainer(contID)
assert.NoError(err)
if err := testCheckContainerOnDiskState(c2, newContainerState); err != nil {
@ -1328,9 +1325,6 @@ func checkSandboxRemains() error {
if err = checkDirNotExist(path.Join(kataHostSharedDir(), testSandboxID)); err != nil {
return fmt.Errorf("%s still exists", path.Join(kataHostSharedDir(), testSandboxID))
}
if _, err = globalSandboxList.lookupSandbox(testSandboxID); err == nil {
return fmt.Errorf("globalSandboxList for %s stil exists", testSandboxID)
}
return nil
}

View File

@ -1,49 +0,0 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"fmt"
"sync"
)
type sandboxList struct {
lock sync.RWMutex
sandboxes map[string]*Sandbox
}
// globalSandboxList tracks sandboxes globally
var globalSandboxList = &sandboxList{sandboxes: make(map[string]*Sandbox)}
func (p *sandboxList) addSandbox(sandbox *Sandbox) (err error) {
if sandbox == nil {
return nil
}
p.lock.Lock()
defer p.lock.Unlock()
if p.sandboxes[sandbox.id] == nil {
p.sandboxes[sandbox.id] = sandbox
} else {
err = fmt.Errorf("sandbox %s exists", sandbox.id)
}
return err
}
func (p *sandboxList) removeSandbox(id string) {
p.lock.Lock()
defer p.lock.Unlock()
delete(p.sandboxes, id)
}
func (p *sandboxList) lookupSandbox(id string) (*Sandbox, error) {
p.lock.RLock()
defer p.lock.RUnlock()
if p.sandboxes[id] != nil {
return p.sandboxes[id], nil
}
return nil, fmt.Errorf("sandbox %s does not exist", id)
}

View File

@ -1,31 +0,0 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSandboxListOperations(t *testing.T) {
p := &Sandbox{id: "testsandboxListsandbox"}
l := &sandboxList{sandboxes: make(map[string]*Sandbox)}
err := l.addSandbox(p)
assert.Nil(t, err, "addSandbox failed")
err = l.addSandbox(p)
assert.NotNil(t, err, "add same sandbox should fail")
np, err := l.lookupSandbox(p.id)
assert.Nil(t, err, "lookupSandbox failed")
assert.Equal(t, np, p, "lookupSandbox returns different sandbox %v:%v", np, p)
_, err = l.lookupSandbox("some-non-existing-sandbox-name")
assert.NotNil(t, err, "lookupSandbox for non-existing sandbox should fail")
l.removeSandbox(p.id)
}

View File

@ -58,7 +58,6 @@ var testHyperstartTtySocket = ""
// cleanUp Removes any stale sandbox/container state that can affect
// the next test to run.
func cleanUp() {
globalSandboxList.removeSandbox(testSandboxID)
os.RemoveAll(fs.MockRunStoragePath())
os.RemoveAll(fs.MockRunVMStoragePath())
syscall.Unmount(getSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)