mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-30 17:22:33 +00:00
virtcontainers: Conversion to Stores
We convert the whole virtcontainers code to use the store package instead of the resource_storage one. The resource_storage removal will happen in a separate change for a more logical split. This change is fairly big but mostly does not change the code logic. What really changes is when we create a store for a container or a sandbox. We now need to explictly do so instead of just assigning a filesystem{} instance. Other than that, the logic is kept intact. Fixes: #1099 Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
parent
2ecffda170
commit
fad23ea54e
@ -14,6 +14,7 @@ import (
|
||||
deviceApi "github.com/kata-containers/runtime/virtcontainers/device/api"
|
||||
deviceConfig "github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
@ -142,7 +143,7 @@ func DeleteSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
// Fetch the sandbox from storage and create it.
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
@ -175,7 +176,7 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
// Fetch the sandbox from storage and create it.
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
@ -212,7 +213,7 @@ func StartSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
// Fetch the sandbox from storage and create it.
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
@ -244,7 +245,7 @@ func StopSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
// Fetch the sandbox from storage and create it.
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
@ -279,7 +280,7 @@ func RunSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(s.id, lockFile)
|
||||
|
||||
// Start the sandbox
|
||||
err = s.Start()
|
||||
@ -295,7 +296,7 @@ func ListSandbox(ctx context.Context) ([]SandboxStatus, error) {
|
||||
span, ctx := trace(ctx, "ListSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
dir, err := os.Open(configStoragePath)
|
||||
dir, err := os.Open(store.ConfigStoragePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// No sandbox directory is not an error
|
||||
@ -338,11 +339,11 @@ func StatusSandbox(ctx context.Context, sandboxID string) (SandboxStatus, error)
|
||||
if err != nil {
|
||||
return SandboxStatus{}, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
unlockSandbox(lockFile)
|
||||
unlockSandbox(sandboxID, lockFile)
|
||||
return SandboxStatus{}, err
|
||||
}
|
||||
defer s.releaseStatelessSandbox()
|
||||
@ -384,7 +385,7 @@ func CreateContainer(ctx context.Context, sandboxID string, containerConfig Cont
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -419,7 +420,7 @@ func DeleteContainer(ctx context.Context, sandboxID, containerID string) (VCCont
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -448,7 +449,7 @@ func StartContainer(ctx context.Context, sandboxID, containerID string) (VCConta
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -477,7 +478,7 @@ func StopContainer(ctx context.Context, sandboxID, containerID string) (VCContai
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -506,7 +507,7 @@ func EnterContainer(ctx context.Context, sandboxID, containerID string, cmd type
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -540,11 +541,11 @@ func StatusContainer(ctx context.Context, sandboxID, containerID string) (Contai
|
||||
if err != nil {
|
||||
return ContainerStatus{}, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
unlockSandbox(lockFile)
|
||||
unlockSandbox(sandboxID, lockFile)
|
||||
return ContainerStatus{}, err
|
||||
}
|
||||
defer s.releaseStatelessSandbox()
|
||||
@ -621,7 +622,7 @@ func KillContainer(ctx context.Context, sandboxID, containerID string, signal sy
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -668,7 +669,7 @@ func ProcessListContainer(ctx context.Context, sandboxID, containerID string, op
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -697,7 +698,7 @@ func UpdateContainer(ctx context.Context, sandboxID, containerID string, resourc
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -726,7 +727,7 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain
|
||||
return ContainerStats{}, err
|
||||
}
|
||||
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -750,7 +751,7 @@ func togglePauseContainer(ctx context.Context, sandboxID, containerID string, pa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -794,7 +795,7 @@ func AddDevice(ctx context.Context, sandboxID string, info deviceConfig.DeviceIn
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -814,7 +815,7 @@ func toggleInterface(ctx context.Context, sandboxID string, inf *vcTypes.Interfa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -858,7 +859,7 @@ func ListInterfaces(ctx context.Context, sandboxID string) ([]*vcTypes.Interface
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -882,7 +883,7 @@ func UpdateRoutes(ctx context.Context, sandboxID string, routes []*vcTypes.Route
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -906,7 +907,7 @@ func ListRoutes(ctx context.Context, sandboxID string) ([]*vcTypes.Route, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -195,7 +196,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -240,7 +241,7 @@ func TestCreateSandboxHyperstartAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -281,7 +282,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -310,7 +311,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -355,7 +356,7 @@ func TestDeleteSandboxHyperstartAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -407,7 +408,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -427,7 +428,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) {
|
||||
func TestDeleteSandboxFailing(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
|
||||
os.Remove(sandboxDir)
|
||||
|
||||
p, err := DeleteSandbox(context.Background(), testSandboxID)
|
||||
@ -527,7 +528,7 @@ func TestStartSandboxKataAgentSuccessful(t *testing.T) {
|
||||
func TestStartSandboxFailing(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
|
||||
os.Remove(sandboxDir)
|
||||
|
||||
p, err := StartSandbox(context.Background(), testSandboxID)
|
||||
@ -694,7 +695,7 @@ func TestStopSandboxKataAgentSuccessful(t *testing.T) {
|
||||
func TestStopSandboxFailing(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
|
||||
os.Remove(sandboxDir)
|
||||
|
||||
p, err := StopSandbox(context.Background(), testSandboxID)
|
||||
@ -713,7 +714,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -750,7 +751,7 @@ func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -797,7 +798,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -823,8 +824,6 @@ func TestRunSandboxFailing(t *testing.T) {
|
||||
func TestListSandboxSuccessful(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
os.RemoveAll(configStoragePath)
|
||||
|
||||
config := newTestSandboxConfigNoop()
|
||||
|
||||
ctx := context.Background()
|
||||
@ -842,8 +841,6 @@ func TestListSandboxSuccessful(t *testing.T) {
|
||||
func TestListSandboxNoSandboxDirectory(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
os.RemoveAll(configStoragePath)
|
||||
|
||||
_, err := ListSandbox(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("unexpected ListSandbox error from non-existent sandbox directory: %v", err))
|
||||
@ -982,8 +979,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path := filepath.Join(configStoragePath, p.ID())
|
||||
os.RemoveAll(path)
|
||||
store.DeleteAll()
|
||||
globalSandboxList.removeSandbox(p.ID())
|
||||
|
||||
_, err = StatusSandbox(ctx, p.ID())
|
||||
@ -1003,10 +999,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pImpl, ok := p.(*Sandbox)
|
||||
assert.True(t, ok)
|
||||
|
||||
os.RemoveAll(pImpl.configPath)
|
||||
store.DeleteAll()
|
||||
globalSandboxList.removeSandbox(p.ID())
|
||||
|
||||
_, err = StatusSandbox(ctx, p.ID())
|
||||
@ -1039,7 +1032,7 @@ func TestCreateContainerSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1076,7 +1069,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
@ -1102,7 +1095,7 @@ func TestDeleteContainerSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1135,10 +1128,7 @@ func TestDeleteContainerSuccessful(t *testing.T) {
|
||||
func TestDeleteContainerFailingNoSandbox(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
contID := "100"
|
||||
os.RemoveAll(sandboxDir)
|
||||
|
||||
c, err := DeleteContainer(context.Background(), testSandboxID, contID)
|
||||
if c != nil || err == nil {
|
||||
t.Fatal()
|
||||
@ -1157,7 +1147,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1203,10 +1193,7 @@ func TestStartContainerNoopAgentSuccessful(t *testing.T) {
|
||||
func TestStartContainerFailingNoSandbox(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
contID := "100"
|
||||
os.RemoveAll(sandboxDir)
|
||||
|
||||
c, err := StartContainer(context.Background(), testSandboxID, contID)
|
||||
if c != nil || err == nil {
|
||||
t.Fatal()
|
||||
@ -1225,7 +1212,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1249,7 +1236,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1426,10 +1413,7 @@ func TestStartStopSandboxHyperstartAgentSuccessfulWithDefaultNetwork(t *testing.
|
||||
func TestStopContainerFailingNoSandbox(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
contID := "100"
|
||||
os.RemoveAll(sandboxDir)
|
||||
|
||||
c, err := StopContainer(context.Background(), testSandboxID, contID)
|
||||
if c != nil || err == nil {
|
||||
t.Fatal()
|
||||
@ -1448,7 +1432,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1611,11 +1595,7 @@ func TestEnterContainerHyperstartAgentSuccessful(t *testing.T) {
|
||||
|
||||
func TestEnterContainerFailingNoSandbox(t *testing.T) {
|
||||
cleanUp()
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, testSandboxID)
|
||||
contID := "100"
|
||||
os.RemoveAll(sandboxDir)
|
||||
|
||||
cmd := newBasicTestCmd()
|
||||
|
||||
_, c, _, err := EnterContainer(context.Background(), testSandboxID, contID, cmd)
|
||||
@ -1636,7 +1616,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1696,7 +1676,7 @@ func TestStatusContainerSuccessful(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1748,7 +1728,7 @@ func TestStatusContainerStateReady(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1818,7 +1798,7 @@ func TestStatusContainerStateRunning(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sandboxDir := filepath.Join(configStoragePath, p.ID())
|
||||
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1887,10 +1867,7 @@ func TestStatusContainerFailing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pImpl, ok := p.(*Sandbox)
|
||||
assert.True(t, ok)
|
||||
|
||||
os.RemoveAll(pImpl.configPath)
|
||||
store.DeleteAll()
|
||||
globalSandboxList.removeSandbox(p.ID())
|
||||
|
||||
_, err = StatusContainer(ctx, p.ID(), contID)
|
||||
@ -1911,10 +1888,7 @@ func TestStatsContainerFailing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pImpl, ok := p.(*Sandbox)
|
||||
assert.True(t, ok)
|
||||
|
||||
os.RemoveAll(pImpl.configPath)
|
||||
store.DeleteAll()
|
||||
globalSandboxList.removeSandbox(p.ID())
|
||||
|
||||
_, err = StatsContainer(ctx, p.ID(), contID)
|
||||
@ -1951,7 +1925,7 @@ func TestStatsContainer(t *testing.T) {
|
||||
|
||||
pImpl, ok := p.(*Sandbox)
|
||||
assert.True(ok)
|
||||
defer os.RemoveAll(pImpl.configPath)
|
||||
defer store.DeleteAll()
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, c, err := CreateContainer(ctx, p.ID(), contConfig)
|
||||
@ -1997,7 +1971,7 @@ func TestProcessListContainer(t *testing.T) {
|
||||
|
||||
pImpl, ok := p.(*Sandbox)
|
||||
assert.True(ok)
|
||||
defer os.RemoveAll(pImpl.configPath)
|
||||
defer store.DeleteAll()
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, c, err := CreateContainer(ctx, p.ID(), contConfig)
|
||||
@ -2087,7 +2061,7 @@ func createAndStartSandbox(ctx context.Context, config SandboxConfig) (sandbox V
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
sandboxDir = filepath.Join(configStoragePath, sandbox.ID())
|
||||
sandboxDir = store.SandboxConfigurationRootPath(sandbox.ID())
|
||||
_, err = os.Stat(sandboxDir)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
)
|
||||
|
||||
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/major.h
|
||||
@ -285,6 +286,8 @@ type Container struct {
|
||||
systemMountsInfo SystemMountsInfo
|
||||
|
||||
ctx context.Context
|
||||
|
||||
store *store.VCStore
|
||||
}
|
||||
|
||||
// ID returns the container identifier string.
|
||||
@ -343,23 +346,13 @@ func (c *Container) SetPid(pid int) error {
|
||||
func (c *Container) setStateBlockIndex(index int) error {
|
||||
c.state.BlockIndex = index
|
||||
|
||||
err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.storeState()
|
||||
}
|
||||
|
||||
func (c *Container) setStateFstype(fstype string) error {
|
||||
c.state.Fstype = fstype
|
||||
|
||||
err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.storeState()
|
||||
}
|
||||
|
||||
// GetAnnotations returns container's annotations
|
||||
@ -367,35 +360,44 @@ func (c *Container) GetAnnotations() map[string]string {
|
||||
return c.config.Annotations
|
||||
}
|
||||
|
||||
// storeContainer stores a container config.
|
||||
func (c *Container) storeContainer() error {
|
||||
return c.store.Store(store.Configuration, *(c.config))
|
||||
}
|
||||
|
||||
func (c *Container) storeProcess() error {
|
||||
return c.sandbox.storage.storeContainerProcess(c.sandboxID, c.id, c.process)
|
||||
return c.store.Store(store.Process, c.process)
|
||||
}
|
||||
|
||||
func (c *Container) storeMounts() error {
|
||||
return c.sandbox.storage.storeContainerMounts(c.sandboxID, c.id, c.mounts)
|
||||
}
|
||||
|
||||
func (c *Container) fetchMounts() ([]Mount, error) {
|
||||
return c.sandbox.storage.fetchContainerMounts(c.sandboxID, c.id)
|
||||
return c.store.Store(store.Mounts, c.mounts)
|
||||
}
|
||||
|
||||
func (c *Container) storeDevices() error {
|
||||
return c.sandbox.storage.storeContainerDevices(c.sandboxID, c.id, c.devices)
|
||||
return c.store.Store(store.DeviceIDs, c.devices)
|
||||
}
|
||||
|
||||
func (c *Container) fetchDevices() ([]ContainerDevice, error) {
|
||||
return c.sandbox.storage.fetchContainerDevices(c.sandboxID, c.id)
|
||||
func (c *Container) storeState() error {
|
||||
return c.store.Store(store.State, c.state)
|
||||
}
|
||||
|
||||
// storeContainer stores a container config.
|
||||
func (c *Container) storeContainer() error {
|
||||
fs := filesystem{}
|
||||
err := fs.storeContainerResource(c.sandbox.id, c.id, configFileType, *(c.config))
|
||||
if err != nil {
|
||||
return err
|
||||
func (c *Container) loadMounts() ([]Mount, error) {
|
||||
var mounts []Mount
|
||||
if err := c.store.Load(store.Mounts, &mounts); err != nil {
|
||||
return []Mount{}, err
|
||||
}
|
||||
|
||||
return nil
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func (c *Container) loadDevices() ([]ContainerDevice, error) {
|
||||
var devices []ContainerDevice
|
||||
|
||||
if err := c.store.Load(store.DeviceIDs, &devices); err != nil {
|
||||
return []ContainerDevice{}, err
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
// setContainerState sets both the in-memory and on-disk state of the
|
||||
@ -410,7 +412,7 @@ func (c *Container) setContainerState(state types.StateString) error {
|
||||
c.state.State = state
|
||||
|
||||
// update on-disk state
|
||||
err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
|
||||
err := c.store.Store(store.State, c.state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -418,21 +420,6 @@ func (c *Container) setContainerState(state types.StateString) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) createContainersDirs() error {
|
||||
err := os.MkdirAll(c.runPath, dirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(c.configPath, dirMode)
|
||||
if err != nil {
|
||||
c.sandbox.storage.deleteContainerResources(c.sandboxID, c.id, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir string) (string, bool, error) {
|
||||
randBytes, err := utils.GenerateRandomBytes(8)
|
||||
if err != nil {
|
||||
@ -612,8 +599,8 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
|
||||
rootFs: contConfig.RootFs,
|
||||
config: &contConfig,
|
||||
sandbox: sandbox,
|
||||
runPath: filepath.Join(runStoragePath, sandbox.id, contConfig.ID),
|
||||
configPath: filepath.Join(configStoragePath, sandbox.id, contConfig.ID),
|
||||
runPath: store.ContainerRuntimeRootPath(sandbox.id, contConfig.ID),
|
||||
configPath: store.ContainerConfigurationRootPath(sandbox.id, contConfig.ID),
|
||||
containerPath: filepath.Join(sandbox.id, contConfig.ID),
|
||||
state: types.State{},
|
||||
process: Process{},
|
||||
@ -621,17 +608,24 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
|
||||
ctx: sandbox.ctx,
|
||||
}
|
||||
|
||||
state, err := c.sandbox.storage.fetchContainerState(c.sandboxID, c.id)
|
||||
ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.store = ctrStore
|
||||
|
||||
state, err := c.store.LoadState()
|
||||
if err == nil {
|
||||
c.state = state
|
||||
}
|
||||
|
||||
process, err := c.sandbox.storage.fetchContainerProcess(c.sandboxID, c.id)
|
||||
if err == nil {
|
||||
var process Process
|
||||
if err := c.store.Load(store.Process, &process); err == nil {
|
||||
c.process = process
|
||||
}
|
||||
|
||||
mounts, err := c.fetchMounts()
|
||||
mounts, err := c.loadMounts()
|
||||
if err == nil {
|
||||
// restore mounts from disk
|
||||
c.mounts = mounts
|
||||
@ -671,8 +665,8 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
|
||||
}
|
||||
|
||||
// Devices will be found in storage after create stage has completed.
|
||||
// We fetch devices from storage at all other stages.
|
||||
storedDevices, err := c.fetchDevices()
|
||||
// We load devices from storage at all other stages.
|
||||
storedDevices, err := c.loadDevices()
|
||||
if err == nil {
|
||||
c.devices = storedDevices
|
||||
} else {
|
||||
@ -724,11 +718,6 @@ func (c *Container) checkBlockDeviceSupport() bool {
|
||||
// createContainer creates and start a container inside a Sandbox. It has to be
|
||||
// called only when a new container, not known by the sandbox, has to be created.
|
||||
func (c *Container) create() (err error) {
|
||||
|
||||
if err = c.createContainersDirs(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// In case the container creation fails, the following takes care
|
||||
// of rolling back all the actions previously performed.
|
||||
defer func() {
|
||||
@ -791,7 +780,7 @@ func (c *Container) delete() error {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.sandbox.storage.deleteContainerResources(c.sandboxID, c.id, nil)
|
||||
return c.store.Delete()
|
||||
}
|
||||
|
||||
// checkSandboxRunning validates the container state.
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -89,18 +90,23 @@ func TestContainerSandbox(t *testing.T) {
|
||||
|
||||
func TestContainerRemoveDrive(t *testing.T) {
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "sandbox",
|
||||
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
|
||||
storage: &filesystem{},
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(t, err)
|
||||
|
||||
sandbox.store = vcStore
|
||||
|
||||
container := Container{
|
||||
sandbox: sandbox,
|
||||
id: "testContainer",
|
||||
}
|
||||
|
||||
container.state.Fstype = ""
|
||||
err := container.removeDrive()
|
||||
err = container.removeDrive()
|
||||
|
||||
// hotplugRemoveDevice for hypervisor should not be called.
|
||||
// test should pass without a hypervisor created for the container's sandbox.
|
||||
@ -121,11 +127,6 @@ func TestContainerRemoveDrive(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
err = device.Attach(devReceiver)
|
||||
assert.Nil(t, err)
|
||||
err = sandbox.storage.createAllResources(context.Background(), sandbox)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = sandbox.storeSandboxDevices()
|
||||
assert.Nil(t, err)
|
||||
|
||||
@ -170,7 +171,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err
|
||||
}
|
||||
|
||||
mntDir = filepath.Join(tmpDir, "rootfs")
|
||||
err = os.Mkdir(mntDir, dirMode)
|
||||
err = os.Mkdir(mntDir, store.DirMode)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating dir %s: %s", mntDir, err)
|
||||
}
|
||||
@ -212,11 +213,10 @@ func TestContainerAddDriveDir(t *testing.T) {
|
||||
t.Fatalf("Error while setting up fake rootfs: %v, Skipping test", err)
|
||||
}
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: testSandboxID,
|
||||
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
|
||||
storage: fs,
|
||||
hypervisor: &mockHypervisor{},
|
||||
agent: &noopAgent{},
|
||||
config: &SandboxConfig{
|
||||
@ -226,6 +226,12 @@ func TestContainerAddDriveDir(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
defer store.DeleteAll()
|
||||
|
||||
sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(t, err)
|
||||
sandbox.store = sandboxStore
|
||||
|
||||
contID := "100"
|
||||
container := Container{
|
||||
sandbox: sandbox,
|
||||
@ -233,23 +239,19 @@ func TestContainerAddDriveDir(t *testing.T) {
|
||||
rootFs: fakeRootfs,
|
||||
}
|
||||
|
||||
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id)
|
||||
assert.Nil(t, err)
|
||||
container.store = containerStore
|
||||
|
||||
// create state file
|
||||
path := filepath.Join(runStoragePath, testSandboxID, container.ID())
|
||||
err = os.MkdirAll(path, dirMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
|
||||
stateFilePath := filepath.Join(path, store.StateFile)
|
||||
os.Remove(stateFilePath)
|
||||
|
||||
_, err = os.Create(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(stateFilePath)
|
||||
|
||||
// Make the checkStorageDriver func variable point to a fake check function
|
||||
savedFunc := checkStorageDriver
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
vc "github.com/kata-containers/runtime/virtcontainers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/factory/base"
|
||||
"github.com/kata-containers/runtime/virtcontainers/factory/direct"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
)
|
||||
|
||||
type template struct {
|
||||
@ -29,7 +30,7 @@ var templateWaitForAgent = 2 * time.Second
|
||||
// Fetch finds and returns a pre-built template factory.
|
||||
// TODO: save template metadata and fetch from storage.
|
||||
func Fetch(config vc.VMConfig) (base.FactoryBase, error) {
|
||||
statePath := vc.RunVMStoragePath + "/template"
|
||||
statePath := store.RunVMStoragePath + "/template"
|
||||
t := &template{statePath, config}
|
||||
|
||||
err := t.checkTemplateVM()
|
||||
@ -42,7 +43,7 @@ func Fetch(config vc.VMConfig) (base.FactoryBase, error) {
|
||||
|
||||
// New creates a new VM template factory.
|
||||
func New(ctx context.Context, config vc.VMConfig) base.FactoryBase {
|
||||
statePath := vc.RunVMStoragePath + "/template"
|
||||
statePath := store.RunVMStoragePath + "/template"
|
||||
t := &template{statePath, config}
|
||||
|
||||
err := t.prepareTemplateFiles()
|
||||
|
@ -8,6 +8,7 @@ package virtcontainers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
@ -21,6 +22,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
|
||||
"net"
|
||||
@ -97,7 +99,7 @@ type firecracker struct {
|
||||
fcClient *client.Firecracker //Tracks the current active connection
|
||||
socketPath string
|
||||
|
||||
storage resourceStorage
|
||||
store *store.VCStore
|
||||
config HypervisorConfig
|
||||
pendingDevices []firecrackerDevice // Devices to be added when the FC API is ready
|
||||
ctx context.Context
|
||||
@ -129,7 +131,7 @@ func (fc *firecracker) trace(name string) (opentracing.Span, context.Context) {
|
||||
|
||||
// For firecracker this call only sets the internal structure up.
|
||||
// The sandbox will be created and started through startSandbox().
|
||||
func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||
func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
|
||||
fc.ctx = ctx
|
||||
|
||||
span, _ := fc.trace("createSandbox")
|
||||
@ -138,14 +140,14 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorC
|
||||
//TODO: check validity of the hypervisor config provided
|
||||
//https://github.com/kata-containers/runtime/issues/1065
|
||||
fc.id = id
|
||||
fc.socketPath = filepath.Join(runStoragePath, fc.id, fireSocket)
|
||||
fc.storage = storage
|
||||
fc.socketPath = filepath.Join(store.SandboxRuntimeRootPath(fc.id), fireSocket)
|
||||
fc.store = vcStore
|
||||
fc.config = *hypervisorConfig
|
||||
fc.state.set(notReady)
|
||||
|
||||
// No need to return an error from there since there might be nothing
|
||||
// to fetch if this is the first time the hypervisor is created.
|
||||
if err := fc.storage.fetchHypervisorState(fc.id, &fc.info); err != nil {
|
||||
if err := fc.store.Load(store.Hypervisor, &fc.info); err != nil {
|
||||
fc.Logger().WithField("function", "init").WithError(err).Info("No info could be fetched")
|
||||
}
|
||||
|
||||
@ -246,7 +248,7 @@ func (fc *firecracker) fcInit(timeout int) error {
|
||||
fc.state.set(apiReady)
|
||||
|
||||
// Store VMM information
|
||||
return fc.storage.storeHypervisorState(fc.id, fc.info)
|
||||
return fc.store.Store(store.Hypervisor, fc.info)
|
||||
}
|
||||
|
||||
func (fc *firecracker) client() *client.Firecracker {
|
||||
@ -395,7 +397,13 @@ func (fc *firecracker) createDiskPool() error {
|
||||
isRootDevice := false
|
||||
|
||||
// Create a temporary file as a placeholder backend for the drive
|
||||
hostPath, err := fc.storage.createSandboxTempFile(fc.id)
|
||||
hostURL, err := fc.store.Raw("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We get a full URL from Raw(), we need to parse it.
|
||||
u, err := url.Parse(hostURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -404,7 +412,7 @@ func (fc *firecracker) createDiskPool() error {
|
||||
DriveID: &driveID,
|
||||
IsReadOnly: &isReadOnly,
|
||||
IsRootDevice: &isRootDevice,
|
||||
PathOnHost: &hostPath,
|
||||
PathOnHost: &u.Path,
|
||||
}
|
||||
driveParams.SetBody(drive)
|
||||
_, err = fc.client().Operations.PutGuestDriveByID(driveParams)
|
||||
|
@ -34,7 +34,6 @@ func TestFilesystemCreateAllResourcesSuccessful(t *testing.T) {
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: testSandboxID,
|
||||
storage: fs,
|
||||
config: sandboxConfig,
|
||||
devManager: manager.NewDeviceManager(manager.VirtioBlock, nil),
|
||||
containers: map[string]*Container{},
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
|
||||
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
@ -46,8 +47,8 @@ type HyperConfig struct {
|
||||
|
||||
func (h *hyper) generateSockets(sandbox *Sandbox, c HyperConfig) {
|
||||
sandboxSocketPaths := []string{
|
||||
fmt.Sprintf(defaultSockPathTemplates[0], runStoragePath, sandbox.id),
|
||||
fmt.Sprintf(defaultSockPathTemplates[1], runStoragePath, sandbox.id),
|
||||
fmt.Sprintf(defaultSockPathTemplates[0], store.RunStoragePath, sandbox.id),
|
||||
fmt.Sprintf(defaultSockPathTemplates[1], store.RunStoragePath, sandbox.id),
|
||||
}
|
||||
|
||||
if c.SockCtlName != "" {
|
||||
@ -289,7 +290,7 @@ func (h *hyper) init(ctx context.Context, sandbox *Sandbox, config interface{})
|
||||
}
|
||||
|
||||
// Fetch agent runtime info.
|
||||
if err := sandbox.storage.fetchAgentState(sandbox.id, &h.state); err != nil {
|
||||
if err := sandbox.store.Load(store.Agent, &h.state); err != nil {
|
||||
h.Logger().Debug("Could not retrieve anything from storage")
|
||||
}
|
||||
|
||||
@ -297,7 +298,7 @@ func (h *hyper) init(ctx context.Context, sandbox *Sandbox, config interface{})
|
||||
}
|
||||
|
||||
func (h *hyper) getVMPath(id string) string {
|
||||
return filepath.Join(runStoragePath, id)
|
||||
return store.SandboxRuntimeRootPath(id)
|
||||
}
|
||||
|
||||
func (h *hyper) getSharePath(id string) string {
|
||||
@ -319,7 +320,7 @@ func (h *hyper) configure(hv hypervisor, id, sharePath string, builtin bool, con
|
||||
HostPath: sharePath,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil {
|
||||
if err := os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -487,7 +488,7 @@ func (h *hyper) stopSandbox(sandbox *Sandbox) error {
|
||||
|
||||
h.state.ProxyPid = -1
|
||||
h.state.URL = ""
|
||||
if err := sandbox.storage.storeAgentState(sandbox.id, h.state); err != nil {
|
||||
if err := sandbox.store.Store(store.Agent, h.state); err != nil {
|
||||
// ignore error
|
||||
h.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state")
|
||||
}
|
||||
@ -990,7 +991,7 @@ func (h *hyper) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) err
|
||||
h.state.ProxyPid = pid
|
||||
h.state.URL = url
|
||||
if sandbox != nil {
|
||||
if err := sandbox.storage.storeAgentState(sandbox.id, h.state); err != nil {
|
||||
if err := sandbox.store.Store(store.Agent, h.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/vishvananda/netlink"
|
||||
@ -73,13 +74,13 @@ func TestHyperstartGenerateSocketsSuccessfulNoPathProvided(t *testing.T) {
|
||||
{
|
||||
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 0),
|
||||
ID: fmt.Sprintf(defaultIDTemplate, 0),
|
||||
HostPath: fmt.Sprintf(defaultSockPathTemplates[0], runStoragePath, sandbox.id),
|
||||
HostPath: fmt.Sprintf(defaultSockPathTemplates[0], store.RunStoragePath, sandbox.id),
|
||||
Name: fmt.Sprintf(defaultChannelTemplate, 0),
|
||||
},
|
||||
{
|
||||
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 1),
|
||||
ID: fmt.Sprintf(defaultIDTemplate, 1),
|
||||
HostPath: fmt.Sprintf(defaultSockPathTemplates[1], runStoragePath, sandbox.id),
|
||||
HostPath: fmt.Sprintf(defaultSockPathTemplates[1], store.RunStoragePath, sandbox.id),
|
||||
Name: fmt.Sprintf(defaultChannelTemplate, 1),
|
||||
},
|
||||
}
|
||||
@ -247,13 +248,15 @@ func TestHyperSetProxy(t *testing.T) {
|
||||
h := &hyper{}
|
||||
p := &ccProxy{}
|
||||
s := &Sandbox{
|
||||
storage: &filesystem{ctx: context.Background()},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
err := h.setProxy(s, p, 0, "")
|
||||
assert.Error(err)
|
||||
vcStore, err := store.NewVCSandboxStore(s.ctx, "foobar")
|
||||
assert.Nil(err)
|
||||
|
||||
err = h.setProxy(s, p, 0, "foobar")
|
||||
s.store = vcStore
|
||||
|
||||
err = h.setProxy(s, p, 0, "")
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -591,7 +592,7 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
|
||||
// hypervisor is the virtcontainers hypervisor interface.
|
||||
// The default hypervisor implementation is Qemu.
|
||||
type hypervisor interface {
|
||||
createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error
|
||||
createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error
|
||||
startSandbox(timeout int) error
|
||||
stopSandbox() error
|
||||
pauseSandbox() error
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
@ -131,7 +132,7 @@ func (k *kataAgent) Logger() *logrus.Entry {
|
||||
}
|
||||
|
||||
func (k *kataAgent) getVMPath(id string) string {
|
||||
return filepath.Join(RunVMStoragePath, id)
|
||||
return filepath.Join(store.RunVMStoragePath, id)
|
||||
}
|
||||
|
||||
func (k *kataAgent) getSharePath(id string) string {
|
||||
@ -193,7 +194,7 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface
|
||||
k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType)
|
||||
|
||||
// Fetch agent runtime info.
|
||||
if err := sandbox.storage.fetchAgentState(sandbox.id, &k.state); err != nil {
|
||||
if err := sandbox.store.Load(store.Agent, &k.state); err != nil {
|
||||
k.Logger().Debug("Could not retrieve anything from storage")
|
||||
}
|
||||
|
||||
@ -272,7 +273,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
|
||||
HostPath: sharePath,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil {
|
||||
if err := os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -578,7 +579,7 @@ func (k *kataAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string)
|
||||
k.state.ProxyPid = pid
|
||||
k.state.URL = url
|
||||
if sandbox != nil {
|
||||
if err := sandbox.storage.storeAgentState(sandbox.id, k.state); err != nil {
|
||||
if err := sandbox.store.Store(store.Agent, k.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -696,7 +697,7 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
|
||||
// clean up agent state
|
||||
k.state.ProxyPid = -1
|
||||
k.state.URL = ""
|
||||
if err := sandbox.storage.storeAgentState(sandbox.id, k.state); err != nil {
|
||||
if err := sandbox.store.Store(store.Agent, k.state); err != nil {
|
||||
// ignore error
|
||||
k.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state")
|
||||
}
|
||||
@ -1799,7 +1800,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
|
||||
|
||||
cpReq := &grpc.CopyFileRequest{
|
||||
Path: dst,
|
||||
DirMode: uint32(dirMode),
|
||||
DirMode: uint32(store.DirMode),
|
||||
FileMode: st.Mode,
|
||||
FileSize: fileSize,
|
||||
Uid: int32(st.Uid),
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -20,7 +21,6 @@ import (
|
||||
gpb "github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
aTypes "github.com/kata-containers/agent/pkg/types"
|
||||
@ -32,6 +32,7 @@ import (
|
||||
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -736,9 +737,13 @@ func TestAgentCreateContainer(t *testing.T) {
|
||||
},
|
||||
},
|
||||
hypervisor: &mockHypervisor{},
|
||||
storage: &filesystem{},
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(err)
|
||||
|
||||
sandbox.store = vcStore
|
||||
|
||||
container := &Container{
|
||||
ctx: sandbox.ctx,
|
||||
id: "barfoo",
|
||||
@ -839,15 +844,15 @@ func TestKataAgentSetProxy(t *testing.T) {
|
||||
p := &kataBuiltInProxy{}
|
||||
s := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
storage: &filesystem{
|
||||
ctx: context.Background(),
|
||||
},
|
||||
id: "foobar",
|
||||
}
|
||||
|
||||
err := k.setProxy(s, p, 0, "")
|
||||
assert.Error(err)
|
||||
vcStore, err := store.NewVCSandboxStore(s.ctx, s.id)
|
||||
assert.Nil(err)
|
||||
|
||||
err = k.setProxy(s, p, 0, "foobar")
|
||||
s.store = vcStore
|
||||
|
||||
err = k.setProxy(s, p, 0, "")
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -23,7 +24,7 @@ func (m *mockHypervisor) hypervisorConfig() HypervisorConfig {
|
||||
return HypervisorConfig{}
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error {
|
||||
err := hypervisorConfig.valid()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -23,13 +23,12 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
|
||||
HypervisorPath: "",
|
||||
},
|
||||
},
|
||||
storage: &filesystem{},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// wrong config
|
||||
if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err == nil {
|
||||
if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, nil); err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
|
||||
@ -39,7 +38,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
|
||||
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
|
||||
}
|
||||
|
||||
if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||
if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func testCreateNoopContainer() (*Sandbox, *Container, error) {
|
||||
cleanUp()
|
||||
|
||||
contID := "100"
|
||||
config := newTestSandboxConfigNoop()
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -146,7 +147,7 @@ func validateProxyConfig(proxyConfig ProxyConfig) error {
|
||||
func defaultProxyURL(id, socketType string) (string, error) {
|
||||
switch socketType {
|
||||
case SocketTypeUNIX:
|
||||
socketPath := filepath.Join(runStoragePath, id, "proxy.sock")
|
||||
socketPath := filepath.Join(store.SandboxRuntimeRootPath(id), "proxy.sock")
|
||||
return fmt.Sprintf("unix://%s", socketPath), nil
|
||||
case SocketTypeVSOCK:
|
||||
// TODO Build the VSOCK default URL
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -222,7 +223,7 @@ func testDefaultProxyURL(expectedURL string, socketType string, sandboxID string
|
||||
}
|
||||
|
||||
func TestDefaultProxyURLUnix(t *testing.T) {
|
||||
path := filepath.Join(runStoragePath, sandboxID, "proxy.sock")
|
||||
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock")
|
||||
socketPath := fmt.Sprintf("unix://%s", path)
|
||||
|
||||
if err := testDefaultProxyURL(socketPath, SocketTypeUNIX, sandboxID); err != nil {
|
||||
@ -237,7 +238,7 @@ func TestDefaultProxyURLVSock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDefaultProxyURLUnknown(t *testing.T) {
|
||||
path := filepath.Join(runStoragePath, sandboxID, "proxy.sock")
|
||||
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock")
|
||||
socketPath := fmt.Sprintf("unix://%s", path)
|
||||
|
||||
if err := testDefaultProxyURL(socketPath, "foobar", sandboxID); err == nil {
|
||||
@ -261,7 +262,7 @@ func testProxyStart(t *testing.T, agent agent, proxy proxy) {
|
||||
}
|
||||
|
||||
invalidPath := filepath.Join(tmpdir, "enoent")
|
||||
expectedSocketPath := filepath.Join(runStoragePath, testSandboxID, "proxy.sock")
|
||||
expectedSocketPath := filepath.Join(store.SandboxRuntimeRootPath(testSandboxID), "proxy.sock")
|
||||
expectedURI := fmt.Sprintf("unix://%s", expectedSocketPath)
|
||||
|
||||
data := []testData{
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -60,7 +61,7 @@ type QemuState struct {
|
||||
type qemu struct {
|
||||
id string
|
||||
|
||||
storage resourceStorage
|
||||
store *store.VCStore
|
||||
|
||||
config HypervisorConfig
|
||||
|
||||
@ -210,7 +211,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) {
|
||||
}
|
||||
|
||||
// setup sets the Qemu structure up.
|
||||
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
|
||||
span, _ := q.trace("setup")
|
||||
defer span.Finish()
|
||||
|
||||
@ -220,7 +221,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso
|
||||
}
|
||||
|
||||
q.id = id
|
||||
q.storage = storage
|
||||
q.store = vcStore
|
||||
q.config = *hypervisorConfig
|
||||
q.arch = newQemuArch(q.config)
|
||||
|
||||
@ -238,7 +239,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso
|
||||
q.nvdimmCount = 0
|
||||
}
|
||||
|
||||
if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil {
|
||||
if err = q.store.Load(store.Hypervisor, &q.state); err != nil {
|
||||
q.Logger().Debug("Creating bridges")
|
||||
q.state.Bridges = q.arch.bridges(q.config.DefaultBridges)
|
||||
|
||||
@ -249,11 +250,11 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso
|
||||
|
||||
// The path might already exist, but in case of VM templating,
|
||||
// we have to create it since the sandbox has not created it yet.
|
||||
if err = os.MkdirAll(filepath.Join(runStoragePath, id), dirMode); err != nil {
|
||||
if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = q.storage.storeHypervisorState(q.id, q.state); err != nil {
|
||||
if err = q.store.Store(store.Hypervisor, q.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -308,7 +309,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
||||
}
|
||||
|
||||
func (q *qemu) qmpSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(RunVMStoragePath, id, qmpSocket)
|
||||
return utils.BuildSocketPath(store.RunVMStoragePath, id, qmpSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
|
||||
@ -416,14 +417,14 @@ func (q *qemu) setupTemplate(knobs *govmmQemu.Knobs, memory *govmmQemu.Memory) g
|
||||
}
|
||||
|
||||
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
|
||||
func (q *qemu) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||
func (q *qemu) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error {
|
||||
// Save the tracing context
|
||||
q.ctx = ctx
|
||||
|
||||
span, _ := q.trace("createSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
if err := q.setup(id, hypervisorConfig, storage); err != nil {
|
||||
if err := q.setup(id, hypervisorConfig, store); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -560,8 +561,8 @@ func (q *qemu) startSandbox(timeout int) error {
|
||||
q.fds = []*os.File{}
|
||||
}()
|
||||
|
||||
vmPath := filepath.Join(RunVMStoragePath, q.id)
|
||||
err := os.MkdirAll(vmPath, dirMode)
|
||||
vmPath := filepath.Join(store.RunVMStoragePath, q.id)
|
||||
err := os.MkdirAll(vmPath, store.DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -655,7 +656,7 @@ func (q *qemu) stopSandbox() error {
|
||||
}
|
||||
|
||||
// cleanup vm path
|
||||
dir := filepath.Join(RunVMStoragePath, q.id)
|
||||
dir := filepath.Join(store.RunVMStoragePath, q.id)
|
||||
|
||||
// If it's a symlink, remove both dir and the target.
|
||||
// This can happen when vm template links a sandbox to a vm.
|
||||
@ -1020,7 +1021,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
|
||||
return data, err
|
||||
}
|
||||
|
||||
return data, q.storage.storeHypervisorState(q.id, q.state)
|
||||
return data, q.store.Store(store.Hypervisor, q.state)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
@ -1032,7 +1033,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
|
||||
return data, err
|
||||
}
|
||||
|
||||
return data, q.storage.storeHypervisorState(q.id, q.state)
|
||||
return data, q.store.Store(store.Hypervisor, q.state)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
|
||||
@ -1112,12 +1113,12 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
|
||||
hotpluggedVCPUs++
|
||||
if hotpluggedVCPUs == amount {
|
||||
// All vCPUs were hotplugged
|
||||
return amount, q.storage.storeHypervisorState(q.id, q.state)
|
||||
return amount, q.store.Store(store.Hypervisor, q.state)
|
||||
}
|
||||
}
|
||||
|
||||
// All vCPUs were NOT hotplugged
|
||||
if err := q.storage.storeHypervisorState(q.id, q.state); err != nil {
|
||||
if err := q.store.Store(store.Hypervisor, q.state); err != nil {
|
||||
q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err)
|
||||
}
|
||||
|
||||
@ -1137,7 +1138,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
|
||||
// get the last vCPUs and try to remove it
|
||||
cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1]
|
||||
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil {
|
||||
_ = q.storage.storeHypervisorState(q.id, q.state)
|
||||
_ = q.store.Store(store.Hypervisor, q.state)
|
||||
return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err)
|
||||
}
|
||||
|
||||
@ -1145,7 +1146,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
|
||||
q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1]
|
||||
}
|
||||
|
||||
return amount, q.storage.storeHypervisorState(q.id, q.state)
|
||||
return amount, q.store.Store(store.Hypervisor, q.state)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) {
|
||||
@ -1217,7 +1218,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
|
||||
}
|
||||
|
||||
q.state.HotpluggedMemory += memDev.sizeMB
|
||||
return memDev.sizeMB, q.storage.storeHypervisorState(q.id, q.state)
|
||||
return memDev.sizeMB, q.store.Store(store.Hypervisor, q.state)
|
||||
}
|
||||
|
||||
func (q *qemu) pauseSandbox() error {
|
||||
@ -1269,7 +1270,7 @@ func (q *qemu) getSandboxConsole(id string) (string, error) {
|
||||
span, _ := q.trace("getSandboxConsole")
|
||||
defer span.Finish()
|
||||
|
||||
return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket)
|
||||
return utils.BuildSocketPath(store.RunVMStoragePath, id, consoleSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) saveSandbox() error {
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -226,7 +227,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
qemuArchBase := newQemuArchBase()
|
||||
|
||||
path := filepath.Join(runStoragePath, sandboxID, consoleSocket)
|
||||
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket)
|
||||
|
||||
expectedOut := []govmmQemu.Device{
|
||||
govmmQemu.SerialDevice{
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"testing"
|
||||
|
||||
govmmQemu "github.com/intel/govmm/qemu"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -75,27 +76,33 @@ func TestQemuCreateSandbox(t *testing.T) {
|
||||
q := &qemu{}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
id: "testSandbox",
|
||||
storage: &filesystem{},
|
||||
ctx: context.Background(),
|
||||
id: "testSandbox",
|
||||
config: &SandboxConfig{
|
||||
HypervisorConfig: qemuConfig,
|
||||
},
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sandbox.store = vcStore
|
||||
|
||||
// Create the hypervisor fake binary
|
||||
testQemuPath := filepath.Join(testDir, testHypervisor)
|
||||
_, err := os.Create(testQemuPath)
|
||||
_, err = os.Create(testQemuPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create hypervisor file %s: %v", testQemuPath, err)
|
||||
}
|
||||
|
||||
// Create parent dir path for hypervisor.json
|
||||
parentDir := filepath.Join(runStoragePath, sandbox.id)
|
||||
if err := os.MkdirAll(parentDir, dirMode); err != nil {
|
||||
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
|
||||
if err := os.MkdirAll(parentDir, store.DirMode); err != nil {
|
||||
t.Fatalf("Could not create parent directory %s: %v", parentDir, err)
|
||||
}
|
||||
|
||||
if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||
if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.store); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -113,27 +120,33 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
|
||||
q := &qemu{}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
id: "testSandbox",
|
||||
storage: &filesystem{},
|
||||
ctx: context.Background(),
|
||||
id: "testSandbox",
|
||||
config: &SandboxConfig{
|
||||
HypervisorConfig: qemuConfig,
|
||||
},
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sandbox.store = vcStore
|
||||
|
||||
// Create the hypervisor fake binary
|
||||
testQemuPath := filepath.Join(testDir, testHypervisor)
|
||||
_, err := os.Create(testQemuPath)
|
||||
_, err = os.Create(testQemuPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create hypervisor file %s: %v", testQemuPath, err)
|
||||
}
|
||||
|
||||
// Ensure parent dir path for hypervisor.json does not exist.
|
||||
parentDir := filepath.Join(runStoragePath, sandbox.id)
|
||||
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
|
||||
if err := os.RemoveAll(parentDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil {
|
||||
if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.store); err != nil {
|
||||
t.Fatalf("Qemu createSandbox() is not expected to fail because of missing parent directory for storage: %v", err)
|
||||
}
|
||||
}
|
||||
@ -291,7 +304,7 @@ func TestQemuGetSandboxConsole(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
}
|
||||
sandboxID := "testSandboxID"
|
||||
expected := filepath.Join(RunVMStoragePath, sandboxID, consoleSocket)
|
||||
expected := filepath.Join(store.RunVMStoragePath, sandboxID, consoleSocket)
|
||||
|
||||
result, err := q.getSandboxConsole(sandboxID)
|
||||
if err != nil {
|
||||
@ -367,14 +380,19 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
qemuConfig := newQemuConfig()
|
||||
fs := &filesystem{}
|
||||
q := &qemu{
|
||||
ctx: context.Background(),
|
||||
config: qemuConfig,
|
||||
storage: fs,
|
||||
ctx: context.Background(),
|
||||
id: "qemuTest",
|
||||
config: qemuConfig,
|
||||
}
|
||||
|
||||
_, err := q.hotplugAddDevice(&memoryDevice{0, 128}, fsDev)
|
||||
vcStore, err := store.NewVCSandboxStore(q.ctx, q.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
q.store = vcStore
|
||||
|
||||
_, err = q.hotplugAddDevice(&memoryDevice{0, 128}, fsDev)
|
||||
assert.Error(err)
|
||||
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128}, fsDev)
|
||||
assert.Error(err)
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
@ -26,6 +25,7 @@ import (
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
deviceManager "github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
"github.com/vishvananda/netlink"
|
||||
@ -137,64 +137,6 @@ func (sandboxConfig *SandboxConfig) valid() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// R/W lock
|
||||
exclusiveLock = syscall.LOCK_EX
|
||||
|
||||
// Read only lock
|
||||
sharedLock = syscall.LOCK_SH
|
||||
)
|
||||
|
||||
// rLockSandbox locks the sandbox with a shared lock.
|
||||
func rLockSandbox(sandboxID string) (*os.File, error) {
|
||||
return lockSandbox(sandboxID, sharedLock)
|
||||
}
|
||||
|
||||
// rwLockSandbox locks the sandbox with an exclusive lock.
|
||||
func rwLockSandbox(sandboxID string) (*os.File, error) {
|
||||
return lockSandbox(sandboxID, exclusiveLock)
|
||||
}
|
||||
|
||||
// lock locks any sandbox to prevent it from being accessed by other processes.
|
||||
func lockSandbox(sandboxID string, lockType int) (*os.File, error) {
|
||||
if sandboxID == "" {
|
||||
return nil, errNeedSandboxID
|
||||
}
|
||||
|
||||
fs := filesystem{}
|
||||
sandboxlockFile, _, err := fs.sandboxURI(sandboxID, lockFileType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lockFile, err := os.Open(sandboxlockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := syscall.Flock(int(lockFile.Fd()), lockType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lockFile, nil
|
||||
}
|
||||
|
||||
// unlock unlocks any sandbox to allow it being accessed by other processes.
|
||||
func unlockSandbox(lockFile *os.File) error {
|
||||
if lockFile == nil {
|
||||
return fmt.Errorf("lockFile cannot be empty")
|
||||
}
|
||||
|
||||
err := syscall.Flock(int(lockFile.Fd()), syscall.LOCK_UN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lockFile.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sandbox is composed of a set of containers and a runtime environment.
|
||||
// A Sandbox can be created, deleted, started, paused, stopped, listed, entered, and restored.
|
||||
type Sandbox struct {
|
||||
@ -204,7 +146,7 @@ type Sandbox struct {
|
||||
factory Factory
|
||||
hypervisor hypervisor
|
||||
agent agent
|
||||
storage resourceStorage
|
||||
store *store.VCStore
|
||||
network Network
|
||||
monitor *monitor
|
||||
|
||||
@ -269,12 +211,7 @@ func (s *Sandbox) SetAnnotations(annotations map[string]string) error {
|
||||
s.config.Annotations[k] = v
|
||||
}
|
||||
|
||||
err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.store.Store(store.Configuration, *(s.config))
|
||||
}
|
||||
|
||||
// GetAnnotations returns sandbox's annotations
|
||||
@ -476,7 +413,7 @@ func (s *Sandbox) getAndStoreGuestDetails() error {
|
||||
s.seccompSupported = guestDetailRes.AgentDetails.SupportsSeccomp
|
||||
}
|
||||
|
||||
if err = s.storage.storeSandboxResource(s.id, stateFileType, s.state); err != nil {
|
||||
if err = s.store.Store(store.State, s.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -503,14 +440,14 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
|
||||
}
|
||||
|
||||
// Fetch sandbox network to be able to access it from the sandbox structure.
|
||||
networkNS, err := s.storage.fetchSandboxNetwork(s.id)
|
||||
if err == nil {
|
||||
var networkNS NetworkNamespace
|
||||
if err := s.store.Load(store.Network, &networkNS); err == nil {
|
||||
s.networkNS = networkNS
|
||||
}
|
||||
|
||||
devices, err := s.storage.fetchSandboxDevices(s.id)
|
||||
devices, err := s.store.LoadDevices()
|
||||
if err != nil {
|
||||
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("fetch sandbox device failed")
|
||||
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed")
|
||||
}
|
||||
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices)
|
||||
|
||||
@ -518,7 +455,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
|
||||
// If it exists, this means this is a re-creation, i.e.
|
||||
// we don't need to talk to the guest's agent, but only
|
||||
// want to create the sandbox and its containers in memory.
|
||||
state, err := s.storage.fetchSandboxState(s.id)
|
||||
state, err := s.store.LoadState()
|
||||
if err == nil && state.State != "" {
|
||||
s.state = state
|
||||
return s, nil
|
||||
@ -557,12 +494,11 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
factory: factory,
|
||||
hypervisor: hypervisor,
|
||||
agent: agent,
|
||||
storage: &filesystem{},
|
||||
config: &sandboxConfig,
|
||||
volumes: sandboxConfig.Volumes,
|
||||
containers: map[string]*Container{},
|
||||
runPath: filepath.Join(runStoragePath, sandboxConfig.ID),
|
||||
configPath: filepath.Join(configStoragePath, sandboxConfig.ID),
|
||||
runPath: store.SandboxRuntimeRootPath(sandboxConfig.ID),
|
||||
configPath: store.SandboxConfigurationRootPath(sandboxConfig.ID),
|
||||
state: types.State{},
|
||||
annotationsLock: &sync.RWMutex{},
|
||||
wg: &sync.WaitGroup{},
|
||||
@ -572,6 +508,13 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(ctx, s.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.store = vcStore
|
||||
|
||||
if err = globalSandboxList.addSandbox(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -583,17 +526,13 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
}
|
||||
}()
|
||||
|
||||
if err = s.storage.createAllResources(ctx, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.storage.deleteSandboxResources(s.id, nil)
|
||||
s.store.Delete()
|
||||
}
|
||||
}()
|
||||
|
||||
if err = s.hypervisor.createSandbox(ctx, s.id, &sandboxConfig.HypervisorConfig, s.storage); err != nil {
|
||||
if err = s.hypervisor.createSandbox(ctx, s.id, &sandboxConfig.HypervisorConfig, s.store); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -611,7 +550,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
}
|
||||
|
||||
func (s *Sandbox) storeSandboxDevices() error {
|
||||
return s.storage.storeSandboxDevices(s.id, s.devManager.GetAllDevices())
|
||||
return s.store.StoreDevices(s.devManager.GetAllDevices())
|
||||
}
|
||||
|
||||
// storeSandbox stores a sandbox config.
|
||||
@ -619,13 +558,13 @@ func (s *Sandbox) storeSandbox() error {
|
||||
span, _ := s.trace("storeSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
|
||||
err := s.store.Store(store.Configuration, *(s.config))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, container := range s.containers {
|
||||
err = s.storage.storeContainerResource(s.id, id, configFileType, *(container.config))
|
||||
for _, container := range s.containers {
|
||||
err = container.store.Store(store.Configuration, *(container.config))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -634,6 +573,40 @@ func (s *Sandbox) storeSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func rLockSandbox(sandboxID string) (string, error) {
|
||||
store, err := store.NewVCSandboxStore(context.Background(), sandboxID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return store.RLock()
|
||||
}
|
||||
|
||||
func rwLockSandbox(sandboxID string) (string, error) {
|
||||
store, err := store.NewVCSandboxStore(context.Background(), sandboxID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return store.Lock()
|
||||
}
|
||||
|
||||
func unlockSandbox(sandboxID, token string) error {
|
||||
// If the store no longer exists, we won't be able to unlock.
|
||||
// Creating a new store for locking an item that does not even exist
|
||||
// does not make sense.
|
||||
if !store.VCSandboxStoreExists(context.Background(), sandboxID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
store, err := store.NewVCSandboxStore(context.Background(), sandboxID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.Unlock(token)
|
||||
}
|
||||
|
||||
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
|
||||
func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
|
||||
virtLog.Info("fetch sandbox")
|
||||
@ -646,12 +619,17 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
|
||||
return sandbox, err
|
||||
}
|
||||
|
||||
fs := filesystem{}
|
||||
config, err := fs.fetchSandboxConfig(sandboxID)
|
||||
// We're bootstrapping
|
||||
vcStore, err := store.NewVCSandboxStore(context.Background(), sandboxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config SandboxConfig
|
||||
if err := vcStore.Load(store.Configuration, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fetchSandbox is not suppose to create new sandbox VM.
|
||||
sandbox, err = createSandbox(ctx, config, nil)
|
||||
if err != nil {
|
||||
@ -742,7 +720,7 @@ func (s *Sandbox) Delete() error {
|
||||
|
||||
s.agent.cleanup(s.id)
|
||||
|
||||
return s.storage.deleteSandboxResources(s.id, nil)
|
||||
return s.store.Delete()
|
||||
}
|
||||
|
||||
func (s *Sandbox) startNetworkMonitor() error {
|
||||
@ -812,7 +790,7 @@ func (s *Sandbox) createNetwork() error {
|
||||
}
|
||||
|
||||
// Store the network
|
||||
return s.storage.storeSandboxNetwork(s.id, s.networkNS)
|
||||
return s.store.Store(store.Network, s.networkNS)
|
||||
}
|
||||
|
||||
func (s *Sandbox) removeNetwork() error {
|
||||
@ -880,7 +858,7 @@ func (s *Sandbox) AddInterface(inf *vcTypes.Interface) (*vcTypes.Interface, erro
|
||||
|
||||
// Update the sandbox storage
|
||||
s.networkNS.Endpoints = append(s.networkNS.Endpoints, endpoint)
|
||||
if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil {
|
||||
if err := s.store.Store(store.Network, s.networkNS); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -898,7 +876,7 @@ func (s *Sandbox) RemoveInterface(inf *vcTypes.Interface) (*vcTypes.Interface, e
|
||||
return inf, err
|
||||
}
|
||||
s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...)
|
||||
if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil {
|
||||
if err := s.store.Store(store.Network, s.networkNS); err != nil {
|
||||
return inf, err
|
||||
}
|
||||
break
|
||||
@ -969,7 +947,7 @@ func (s *Sandbox) startVM() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil {
|
||||
if err := s.store.Store(store.Network, s.networkNS); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1066,8 +1044,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
|
||||
if err != nil {
|
||||
if err := s.store.Store(store.Configuration, *(s.config)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1151,8 +1128,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
|
||||
}
|
||||
|
||||
// Store sandbox config
|
||||
err = s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
|
||||
if err != nil {
|
||||
if err := s.store.Store(store.Configuration, *(s.config)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1398,7 +1374,7 @@ func (s *Sandbox) setSandboxState(state types.StateString) error {
|
||||
s.state.State = state
|
||||
|
||||
// update on-disk state
|
||||
return s.storage.storeSandboxResource(s.id, stateFileType, s.state)
|
||||
return s.store.Store(store.State, s.state)
|
||||
}
|
||||
|
||||
func (s *Sandbox) pauseSetStates() error {
|
||||
@ -1431,8 +1407,7 @@ func (s *Sandbox) getAndSetSandboxBlockIndex() (int, error) {
|
||||
s.state.BlockIndex++
|
||||
|
||||
// update on-disk state
|
||||
err := s.storage.storeSandboxResource(s.id, stateFileType, s.state)
|
||||
if err != nil {
|
||||
if err := s.store.Store(store.State, s.state); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@ -1445,8 +1420,7 @@ func (s *Sandbox) decrementSandboxBlockIndex() error {
|
||||
s.state.BlockIndex--
|
||||
|
||||
// update on-disk state
|
||||
err := s.storage.storeSandboxResource(s.id, stateFileType, s.state)
|
||||
if err != nil {
|
||||
if err := s.store.Store(store.State, s.state); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1459,7 +1433,7 @@ func (s *Sandbox) setSandboxPid(pid int) error {
|
||||
s.state.Pid = pid
|
||||
|
||||
// update on-disk state
|
||||
return s.storage.storeSandboxResource(s.id, stateFileType, s.state)
|
||||
return s.store.Store(store.State, s.state)
|
||||
}
|
||||
|
||||
func (s *Sandbox) setContainersState(state types.StateString) error {
|
||||
@ -1476,32 +1450,7 @@ func (s *Sandbox) setContainersState(state types.StateString) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) deleteContainerState(containerID string) error {
|
||||
if containerID == "" {
|
||||
return errNeedContainerID
|
||||
}
|
||||
|
||||
err := s.storage.deleteContainerResources(s.id, containerID, []sandboxResource{stateFileType})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) deleteContainersState() error {
|
||||
for _, container := range s.config.Containers {
|
||||
err := s.deleteContainerState(container.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes
|
||||
// it.
|
||||
// togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes it.
|
||||
func togglePauseSandbox(ctx context.Context, sandboxID string, pause bool) (*Sandbox, error) {
|
||||
span, ctx := trace(ctx, "togglePauseSandbox")
|
||||
defer span.Finish()
|
||||
@ -1514,7 +1463,7 @@ func togglePauseSandbox(ctx context.Context, sandboxID string, pause bool) (*San
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unlockSandbox(lockFile)
|
||||
defer unlockSandbox(sandboxID, lockFile)
|
||||
|
||||
// Fetch the sandbox from storage and create it.
|
||||
s, err := fetchSandbox(ctx, sandboxID)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/manager"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@ -179,92 +180,6 @@ func TestSandboxStatePausedReady(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSandboxDir(t *testing.T, resource sandboxResource, expected string) error {
|
||||
fs := filesystem{}
|
||||
_, dir, err := fs.sandboxURI(testSandboxID, resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dir != expected {
|
||||
return fmt.Errorf("Unexpected sandbox directory %s vs %s", dir, expected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testSandboxFile(t *testing.T, resource sandboxResource, expected string) error {
|
||||
fs := filesystem{}
|
||||
file, _, err := fs.sandboxURI(testSandboxID, resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if file != expected {
|
||||
return fmt.Errorf("Unexpected sandbox file %s vs %s", file, expected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSandboxDirConfig(t *testing.T) {
|
||||
err := testSandboxDir(t, configFileType, sandboxDirConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDirState(t *testing.T) {
|
||||
err := testSandboxDir(t, stateFileType, sandboxDirState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDirLock(t *testing.T) {
|
||||
err := testSandboxDir(t, lockFileType, sandboxDirLock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDirNegative(t *testing.T) {
|
||||
fs := filesystem{}
|
||||
_, _, err := fs.sandboxURI("", lockFileType)
|
||||
if err == nil {
|
||||
t.Fatal("Empty sandbox IDs should not be allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxFileConfig(t *testing.T) {
|
||||
err := testSandboxFile(t, configFileType, sandboxFileConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxFileState(t *testing.T) {
|
||||
err := testSandboxFile(t, stateFileType, sandboxFileState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxFileLock(t *testing.T) {
|
||||
err := testSandboxFile(t, lockFileType, sandboxFileLock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxFileNegative(t *testing.T) {
|
||||
fs := filesystem{}
|
||||
_, _, err := fs.sandboxURI("", lockFileType)
|
||||
if err == nil {
|
||||
t.Fatal("Empty sandbox IDs should not be allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func testStateValid(t *testing.T, stateStr types.StateString, expected bool) {
|
||||
state := &types.State{
|
||||
State: stateStr,
|
||||
@ -619,174 +534,6 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxSetSandboxStateFailingStoreSandboxResource(t *testing.T) {
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
storage: fs,
|
||||
}
|
||||
|
||||
err := sandbox.setSandboxState(types.StateReady)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxSetContainersStateFailingEmptySandboxID(t *testing.T) {
|
||||
sandbox := &Sandbox{
|
||||
storage: &filesystem{},
|
||||
}
|
||||
|
||||
containers := map[string]*Container{
|
||||
"100": {
|
||||
id: "100",
|
||||
sandbox: sandbox,
|
||||
},
|
||||
}
|
||||
|
||||
sandbox.containers = containers
|
||||
|
||||
err := sandbox.setContainersState(types.StateReady)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDeleteContainerStateSuccessful(t *testing.T) {
|
||||
contID := "100"
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
id: testSandboxID,
|
||||
storage: fs,
|
||||
}
|
||||
|
||||
path := filepath.Join(runStoragePath, testSandboxID, contID)
|
||||
err := os.MkdirAll(path, dirMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
|
||||
os.Remove(stateFilePath)
|
||||
|
||||
_, err = os.Create(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = sandbox.deleteContainerState(contID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDeleteContainerStateFailingEmptySandboxID(t *testing.T) {
|
||||
contID := "100"
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
storage: fs,
|
||||
}
|
||||
|
||||
err := sandbox.deleteContainerState(contID)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDeleteContainersStateSuccessful(t *testing.T) {
|
||||
var err error
|
||||
|
||||
containers := []ContainerConfig{
|
||||
{
|
||||
ID: "100",
|
||||
},
|
||||
{
|
||||
ID: "200",
|
||||
},
|
||||
}
|
||||
|
||||
sandboxConfig := &SandboxConfig{
|
||||
Containers: containers,
|
||||
}
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
id: testSandboxID,
|
||||
config: sandboxConfig,
|
||||
storage: fs,
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
path := filepath.Join(runStoragePath, testSandboxID, c.ID)
|
||||
err = os.MkdirAll(path, dirMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
|
||||
os.Remove(stateFilePath)
|
||||
|
||||
_, err = os.Create(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = sandbox.deleteContainersState()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
stateFilePath := filepath.Join(runStoragePath, testSandboxID, c.ID, stateFile)
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSandboxDeleteContainersStateFailingEmptySandboxID(t *testing.T) {
|
||||
containers := []ContainerConfig{
|
||||
{
|
||||
ID: "100",
|
||||
},
|
||||
}
|
||||
|
||||
sandboxConfig := &SandboxConfig{
|
||||
Containers: containers,
|
||||
}
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox := &Sandbox{
|
||||
config: sandboxConfig,
|
||||
storage: fs,
|
||||
}
|
||||
|
||||
err := sandbox.deleteContainersState()
|
||||
if err == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainer(t *testing.T) {
|
||||
containerIDs := []string{"abc", "123", "xyz", "rgb"}
|
||||
containers := map[string]*Container{}
|
||||
@ -837,8 +584,8 @@ func TestGetAllContainers(t *testing.T) {
|
||||
|
||||
func TestSetAnnotations(t *testing.T) {
|
||||
sandbox := Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "abcxyz123",
|
||||
storage: &filesystem{},
|
||||
annotationsLock: &sync.RWMutex{},
|
||||
config: &SandboxConfig{
|
||||
Annotations: map[string]string{
|
||||
@ -847,6 +594,12 @@ func TestSetAnnotations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sandbox.store = vcStore
|
||||
|
||||
keyAnnotation := "annotation2"
|
||||
valueAnnotation := "xyz"
|
||||
newAnnotations := map[string]string{
|
||||
@ -947,23 +700,27 @@ func TestContainerSetStateBlockIndex(t *testing.T) {
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox.storage = fs
|
||||
sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sandbox.store = sandboxStore
|
||||
|
||||
c := sandbox.GetContainer("100")
|
||||
if c == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
cImpl, ok := c.(*Container)
|
||||
assert.True(t, ok)
|
||||
|
||||
path := filepath.Join(runStoragePath, testSandboxID, c.ID())
|
||||
err = os.MkdirAll(path, dirMode)
|
||||
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cImpl.store = containerStore
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
|
||||
os.Remove(stateFilePath)
|
||||
path := store.ContainerRuntimeRootPath(testSandboxID, c.ID())
|
||||
stateFilePath := filepath.Join(path, store.StateFile)
|
||||
|
||||
f, err := os.Create(stateFilePath)
|
||||
if err != nil {
|
||||
@ -975,9 +732,6 @@ func TestContainerSetStateBlockIndex(t *testing.T) {
|
||||
Fstype: "vfs",
|
||||
}
|
||||
|
||||
cImpl, ok := c.(*Container)
|
||||
assert.True(t, ok)
|
||||
|
||||
cImpl.state = state
|
||||
|
||||
stateData := `{
|
||||
@ -992,11 +746,6 @@ func TestContainerSetStateBlockIndex(t *testing.T) {
|
||||
}
|
||||
f.Close()
|
||||
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newIndex := 20
|
||||
if err := cImpl.setStateBlockIndex(newIndex); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1046,22 +795,27 @@ func TestContainerStateSetFstype(t *testing.T) {
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
fs := &filesystem{}
|
||||
sandbox.storage = fs
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sandbox.store = vcStore
|
||||
|
||||
c := sandbox.GetContainer("100")
|
||||
if c == nil {
|
||||
t.Fatal()
|
||||
}
|
||||
cImpl, ok := c.(*Container)
|
||||
assert.True(t, ok)
|
||||
|
||||
path := filepath.Join(runStoragePath, testSandboxID, c.ID())
|
||||
err = os.MkdirAll(path, dirMode)
|
||||
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cImpl.store = containerStore
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
os.Remove(stateFilePath)
|
||||
path := store.ContainerRuntimeRootPath(testSandboxID, c.ID())
|
||||
stateFilePath := filepath.Join(path, store.StateFile)
|
||||
|
||||
f, err := os.Create(stateFilePath)
|
||||
if err != nil {
|
||||
@ -1074,9 +828,6 @@ func TestContainerStateSetFstype(t *testing.T) {
|
||||
BlockIndex: 3,
|
||||
}
|
||||
|
||||
cImpl, ok := c.(*Container)
|
||||
assert.True(t, ok)
|
||||
|
||||
cImpl.state = state
|
||||
|
||||
stateData := `{
|
||||
@ -1092,11 +843,6 @@ func TestContainerStateSetFstype(t *testing.T) {
|
||||
}
|
||||
f.Close()
|
||||
|
||||
_, err = os.Stat(stateFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newFstype := "ext4"
|
||||
if err := cImpl.setStateFstype(newFstype); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -1141,7 +887,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
|
||||
testDeviceBDFPath := "0000:00:1c.0"
|
||||
|
||||
devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices")
|
||||
err = os.MkdirAll(devicesDir, dirMode)
|
||||
err = os.MkdirAll(devicesDir, store.DirMode)
|
||||
assert.Nil(t, err)
|
||||
|
||||
deviceFile := filepath.Join(devicesDir, testDeviceBDFPath)
|
||||
@ -1181,15 +927,16 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
|
||||
sandbox := Sandbox{
|
||||
id: "100",
|
||||
containers: containers,
|
||||
storage: &filesystem{},
|
||||
hypervisor: &mockHypervisor{},
|
||||
devManager: dm,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
store, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(t, err)
|
||||
sandbox.store = store
|
||||
|
||||
containers[c.id].sandbox = &sandbox
|
||||
err = sandbox.storage.createAllResources(context.Background(), &sandbox)
|
||||
assert.Nil(t, err, "Error while create all resources for sandbox")
|
||||
|
||||
err = sandbox.storeSandboxDevices()
|
||||
assert.Nil(t, err, "Error while store sandbox devices %s", err)
|
||||
@ -1554,7 +1301,6 @@ func TestContainerProcessIOStream(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttachBlockDevice(t *testing.T) {
|
||||
fs := &filesystem{}
|
||||
hypervisor := &mockHypervisor{}
|
||||
|
||||
hConfig := HypervisorConfig{
|
||||
@ -1567,12 +1313,15 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
|
||||
sandbox := &Sandbox{
|
||||
id: testSandboxID,
|
||||
storage: fs,
|
||||
hypervisor: hypervisor,
|
||||
config: sconfig,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(t, err)
|
||||
sandbox.store = vcStore
|
||||
|
||||
contID := "100"
|
||||
container := Container{
|
||||
sandbox: sandbox,
|
||||
@ -1580,15 +1329,15 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
}
|
||||
|
||||
// create state file
|
||||
path := filepath.Join(runStoragePath, testSandboxID, container.ID())
|
||||
err := os.MkdirAll(path, dirMode)
|
||||
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
|
||||
err = os.MkdirAll(path, store.DirMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
stateFilePath := filepath.Join(path, store.StateFile)
|
||||
os.Remove(stateFilePath)
|
||||
|
||||
_, err = os.Create(stateFilePath)
|
||||
@ -1640,7 +1389,6 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPreAddDevice(t *testing.T) {
|
||||
fs := &filesystem{}
|
||||
hypervisor := &mockHypervisor{}
|
||||
|
||||
hConfig := HypervisorConfig{
|
||||
@ -1655,13 +1403,16 @@ func TestPreAddDevice(t *testing.T) {
|
||||
// create a sandbox first
|
||||
sandbox := &Sandbox{
|
||||
id: testSandboxID,
|
||||
storage: fs,
|
||||
hypervisor: hypervisor,
|
||||
config: sconfig,
|
||||
devManager: dm,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
|
||||
assert.Nil(t, err)
|
||||
sandbox.store = vcStore
|
||||
|
||||
contID := "100"
|
||||
container := Container{
|
||||
sandbox: sandbox,
|
||||
@ -1670,16 +1421,20 @@ func TestPreAddDevice(t *testing.T) {
|
||||
}
|
||||
container.state.State = types.StateReady
|
||||
|
||||
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id)
|
||||
assert.Nil(t, err)
|
||||
container.store = containerStore
|
||||
|
||||
// create state file
|
||||
path := filepath.Join(runStoragePath, testSandboxID, container.ID())
|
||||
err := os.MkdirAll(path, dirMode)
|
||||
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
|
||||
err = os.MkdirAll(path, store.DirMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
stateFilePath := filepath.Join(path, stateFile)
|
||||
stateFilePath := filepath.Join(path, store.StateFile)
|
||||
os.Remove(stateFilePath)
|
||||
|
||||
_, err = os.Create(stateFilePath)
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -46,12 +47,13 @@ var testHyperstartTtySocket = ""
|
||||
// the next test to run.
|
||||
func cleanUp() {
|
||||
globalSandboxList.removeSandbox(testSandboxID)
|
||||
store.DeleteAll()
|
||||
for _, dir := range []string{testDir, defaultSharedDir} {
|
||||
os.RemoveAll(dir)
|
||||
os.MkdirAll(dir, dirMode)
|
||||
os.MkdirAll(dir, store.DirMode)
|
||||
}
|
||||
|
||||
os.Mkdir(filepath.Join(testDir, testBundle), dirMode)
|
||||
os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode)
|
||||
|
||||
_, err := os.Create(filepath.Join(testDir, testImage))
|
||||
if err != nil {
|
||||
@ -82,7 +84,7 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
fmt.Printf("INFO: Creating virtcontainers test directory %s\n", testDir)
|
||||
err = os.MkdirAll(testDir, dirMode)
|
||||
err = os.MkdirAll(testDir, store.DirMode)
|
||||
if err != nil {
|
||||
fmt.Println("Could not create test directories:", err)
|
||||
os.Exit(1)
|
||||
@ -117,7 +119,7 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = os.Mkdir(filepath.Join(testDir, testBundle), dirMode)
|
||||
err = os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode)
|
||||
if err != nil {
|
||||
fmt.Println("Could not create test bundle directory:", err)
|
||||
os.RemoveAll(testDir)
|
||||
@ -125,16 +127,16 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
// allow the tests to run without affecting the host system.
|
||||
configStoragePath = filepath.Join(testDir, storagePathSuffix, "config")
|
||||
runStoragePath = filepath.Join(testDir, storagePathSuffix, "run")
|
||||
store.ConfigStoragePath = filepath.Join(testDir, store.StoragePathSuffix, "config")
|
||||
store.RunStoragePath = filepath.Join(testDir, store.StoragePathSuffix, "run")
|
||||
|
||||
// set now that configStoragePath has been overridden.
|
||||
sandboxDirConfig = filepath.Join(configStoragePath, testSandboxID)
|
||||
sandboxFileConfig = filepath.Join(configStoragePath, testSandboxID, configFile)
|
||||
sandboxDirState = filepath.Join(runStoragePath, testSandboxID)
|
||||
sandboxDirLock = filepath.Join(runStoragePath, testSandboxID)
|
||||
sandboxFileState = filepath.Join(runStoragePath, testSandboxID, stateFile)
|
||||
sandboxFileLock = filepath.Join(runStoragePath, testSandboxID, lockFileName)
|
||||
sandboxDirConfig = filepath.Join(store.ConfigStoragePath, testSandboxID)
|
||||
sandboxFileConfig = filepath.Join(store.ConfigStoragePath, testSandboxID, store.ConfigurationFile)
|
||||
sandboxDirState = filepath.Join(store.RunStoragePath, testSandboxID)
|
||||
sandboxDirLock = filepath.Join(store.RunStoragePath, testSandboxID)
|
||||
sandboxFileState = filepath.Join(store.RunStoragePath, testSandboxID, store.StateFile)
|
||||
sandboxFileLock = filepath.Join(store.RunStoragePath, testSandboxID, store.LockFile)
|
||||
|
||||
testHyperstartCtlSocket = filepath.Join(testDir, "test_hyper.sock")
|
||||
testHyperstartTtySocket = filepath.Join(testDir, "test_tty.sock")
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -118,7 +119,14 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err = hypervisor.createSandbox(ctx, id, &config.HypervisorConfig, &filesystem{}); err != nil {
|
||||
vcStore, err := store.NewVCStore(ctx,
|
||||
store.SandboxConfigurationRoot(id),
|
||||
store.SandboxRuntimeRoot(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = hypervisor.createSandbox(ctx, id, &config.HypervisorConfig, vcStore); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -180,7 +188,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
}
|
||||
|
||||
func buildVMSharePath(id string) string {
|
||||
return filepath.Join(RunVMStoragePath, id, "shared")
|
||||
return filepath.Join(store.RunVMStoragePath, id, "shared")
|
||||
}
|
||||
|
||||
func (v *VM) logger() logrus.FieldLogger {
|
||||
|
Loading…
Reference in New Issue
Block a user