vc: deprecate hyperstart agent

The hyperstart agent has not been supported in kata since 2.1,
so remove it as a component to kata. Mentioned in issue #1113.

Fixes: #1419

Signed-off-by: Gabi Beyer <gabrielle.n.beyer@intel.com>
This commit is contained in:
Gabi Beyer 2019-03-25 12:31:15 -07:00 committed by Gabi Beyer
parent d5a759e1cf
commit d4ef9c05d7
21 changed files with 50 additions and 3944 deletions

View File

@ -60,7 +60,6 @@ const (
kataShimTableType = "kata"
// supported agent component types
hyperstartAgentTableType = "hyperstart"
kataAgentTableType = "kata"
// the maximum amount of PCI bridges that can be cold plugged in a VM
@ -639,15 +638,13 @@ func updateRuntimeConfigAgent(configPath string, tomlConf tomlConfig, config *oc
for k := range tomlConf.Agent {
switch k {
case hyperstartAgentTableType:
config.AgentType = vc.HyperstartAgent
config.AgentConfig = vc.HyperConfig{}
case kataAgentTableType:
config.AgentType = vc.KataContainersAgent
config.AgentConfig = vc.KataAgentConfig{
UseVSock: config.HypervisorConfig.UseVSock,
}
default:
return fmt.Errorf("%s agent type is not supported", k)
}
}
@ -788,7 +785,7 @@ func initConfig() (config oci.RuntimeConfig, err error) {
return oci.RuntimeConfig{}, err
}
defaultAgentConfig = vc.HyperConfig{}
defaultAgentConfig = vc.KataAgentConfig{}
config = oci.RuntimeConfig{
HypervisorType: defaultHypervisor,

View File

@ -1364,7 +1364,7 @@ func TestDefaultMachineAccelerators(t *testing.T) {
func TestUpdateRuntimeConfiguration(t *testing.T) {
assert := assert.New(t)
assert.NotEqual(defaultAgent, vc.HyperstartAgent)
assert.Equal(defaultAgent, vc.KataContainersAgent)
config := oci.RuntimeConfig{}
@ -1433,7 +1433,7 @@ func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) {
func TestUpdateRuntimeConfigurationInvalidKernelParams(t *testing.T) {
assert := assert.New(t)
assert.NotEqual(defaultAgent, vc.HyperstartAgent)
assert.Equal(defaultAgent, vc.KataContainersAgent)
config := oci.RuntimeConfig{}

View File

@ -41,9 +41,6 @@ const (
// NoopAgentType is the No-Op agent.
NoopAgentType AgentType = "noop"
// HyperstartAgent is the Hyper hyperstart agent.
HyperstartAgent AgentType = "hyperstart"
// KataContainersAgent is the Kata Containers agent.
KataContainersAgent AgentType = "kata"
@ -61,9 +58,6 @@ func (agentType *AgentType) Set(value string) error {
case "noop":
*agentType = NoopAgentType
return nil
case "hyperstart":
*agentType = HyperstartAgent
return nil
case "kata":
*agentType = KataContainersAgent
return nil
@ -77,8 +71,6 @@ func (agentType *AgentType) String() string {
switch *agentType {
case NoopAgentType:
return string(NoopAgentType)
case HyperstartAgent:
return string(HyperstartAgent)
case KataContainersAgent:
return string(KataContainersAgent)
default:
@ -91,8 +83,6 @@ func newAgent(agentType AgentType) agent {
switch agentType {
case NoopAgentType:
return &noopAgent{}
case HyperstartAgent:
return &hyper{}
case KataContainersAgent:
return &kataAgent{}
default:
@ -105,13 +95,6 @@ func newAgentConfig(agentType AgentType, agentConfig interface{}) interface{} {
switch agentType {
case NoopAgentType:
return nil
case HyperstartAgent:
var hyperConfig HyperConfig
err := mapstructure.Decode(agentConfig, &hyperConfig)
if err != nil {
return err
}
return hyperConfig
case KataContainersAgent:
var kataAgentConfig KataAgentConfig
err := mapstructure.Decode(agentConfig, &kataAgentConfig)

View File

@ -27,10 +27,6 @@ func TestSetNoopAgentType(t *testing.T) {
testSetAgentType(t, "noop", NoopAgentType)
}
func TestSetHyperstartAgentType(t *testing.T) {
testSetAgentType(t, "hyperstart", HyperstartAgent)
}
func TestSetKataAgentType(t *testing.T) {
testSetAgentType(t, "kata", KataContainersAgent)
}
@ -43,8 +39,7 @@ func TestSetUnknownAgentType(t *testing.T) {
t.Fatal()
}
if agentType == NoopAgentType ||
agentType == HyperstartAgent {
if agentType == NoopAgentType {
t.Fatal()
}
}
@ -60,10 +55,6 @@ func TestStringFromNoopAgentType(t *testing.T) {
testStringFromAgentType(t, NoopAgentType, "noop")
}
func TestStringFromHyperstartAgentType(t *testing.T) {
testStringFromAgentType(t, HyperstartAgent, "hyperstart")
}
func TestStringFromKataAgentType(t *testing.T) {
testStringFromAgentType(t, KataContainersAgent, "kata")
}
@ -85,10 +76,6 @@ func TestNewAgentFromNoopAgentType(t *testing.T) {
testNewAgentFromAgentType(t, NoopAgentType, &noopAgent{})
}
func TestNewAgentFromHyperstartAgentType(t *testing.T) {
testNewAgentFromAgentType(t, HyperstartAgent, &hyper{})
}
func TestNewAgentFromKataAgentType(t *testing.T) {
testNewAgentFromAgentType(t, KataContainersAgent, &kataAgent{})
}
@ -116,17 +103,6 @@ func TestNewAgentConfigFromNoopAgentType(t *testing.T) {
testNewAgentConfig(t, sandboxConfig, agentConfig)
}
func TestNewAgentConfigFromHyperstartAgentType(t *testing.T) {
agentConfig := HyperConfig{}
sandboxConfig := SandboxConfig{
AgentType: HyperstartAgent,
AgentConfig: agentConfig,
}
testNewAgentConfig(t, sandboxConfig, agentConfig)
}
func TestNewAgentConfigFromKataAgentType(t *testing.T) {
agentConfig := KataAgentConfig{UseVSock: true}

View File

@ -17,7 +17,6 @@ import (
"syscall"
"testing"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
@ -94,87 +93,6 @@ func newTestSandboxConfigNoop() SandboxConfig {
return sandboxConfig
}
func newTestSandboxConfigHyperstartAgent() SandboxConfig {
// Define the container command and bundle.
container := ContainerConfig{
ID: containerID,
RootFs: RootFs{Target: filepath.Join(testDir, testBundle), Mounted: true},
Cmd: newBasicTestCmd(),
Annotations: containerAnnotations,
}
// Sets the hypervisor configuration.
hypervisorConfig := HypervisorConfig{
KernelPath: filepath.Join(testDir, testKernel),
ImagePath: filepath.Join(testDir, testImage),
HypervisorPath: filepath.Join(testDir, testHypervisor),
}
agentConfig := HyperConfig{
SockCtlName: testHyperstartCtlSocket,
SockTtyName: testHyperstartTtySocket,
}
sandboxConfig := SandboxConfig{
ID: testSandboxID,
HypervisorType: MockHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: HyperstartAgent,
AgentConfig: agentConfig,
Containers: []ContainerConfig{container},
Annotations: sandboxAnnotations,
ProxyType: NoopProxyType,
}
return sandboxConfig
}
func newTestSandboxConfigHyperstartAgentDefaultNetwork() SandboxConfig {
// Define the container command and bundle.
container := ContainerConfig{
ID: containerID,
RootFs: RootFs{Target: filepath.Join(testDir, testBundle), Mounted: true},
Cmd: newBasicTestCmd(),
Annotations: containerAnnotations,
}
// Sets the hypervisor configuration.
hypervisorConfig := HypervisorConfig{
KernelPath: filepath.Join(testDir, testKernel),
ImagePath: filepath.Join(testDir, testImage),
HypervisorPath: filepath.Join(testDir, testHypervisor),
}
agentConfig := HyperConfig{
SockCtlName: testHyperstartCtlSocket,
SockTtyName: testHyperstartTtySocket,
}
netConfig := NetworkConfig{}
sandboxConfig := SandboxConfig{
ID: testSandboxID,
HypervisorType: MockHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: HyperstartAgent,
AgentConfig: agentConfig,
NetworkConfig: netConfig,
Containers: []ContainerConfig{container},
Annotations: sandboxAnnotations,
ProxyType: NoopProxyType,
}
return sandboxConfig
}
func newTestSandboxConfigKataAgent() SandboxConfig {
// Sets the hypervisor configuration.
hypervisorConfig := HypervisorConfig{
@ -227,39 +145,6 @@ func testGenerateCCProxySockDir() (string, error) {
return dir, nil
}
func TestCreateSandboxHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
p, err := CreateSandbox(context.Background(), config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
_, err = os.Stat(sandboxDir)
if err != nil {
t.Fatal(err)
}
}
func TestCreateSandboxKataAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@ -340,51 +225,6 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) {
}
}
func TestDeleteSandboxHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
ctx := context.Background()
p, err := CreateSandbox(ctx, config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
_, err = os.Stat(sandboxDir)
if err != nil {
t.Fatal(err)
}
p, err = DeleteSandbox(ctx, p.ID())
if p == nil || err != nil {
t.Fatal(err)
}
_, err = os.Stat(sandboxDir)
if err == nil {
t.Fatal(err)
}
}
func TestDeleteSandboxKataAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@ -460,42 +300,6 @@ func TestStartSandboxNoopAgentSuccessful(t *testing.T) {
}
}
func TestStartSandboxHyperstartAgentSuccessful(t *testing.T) {
defer cleanUp()
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, _, err := createAndStartSandbox(ctx, config)
if p == nil || err != nil {
t.Fatal(err)
}
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
}
func TestStartSandboxKataAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@ -534,7 +338,10 @@ func TestStartSandboxKataAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
// TODO: defaultSharedDir is a hyper var = /run/hyper/shared/sandboxes
// do we need to unmount sandboxes and containers?
bindUnmountAllRootfs(ctx, testDir, pImpl)
}
func TestStartSandboxFailing(t *testing.T) {
@ -627,42 +434,6 @@ func TestPauseThenResumeSandboxNoopAgentSuccessful(t *testing.T) {
}
}
func TestStopSandboxHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, _, err := createAndStartSandbox(ctx, config)
if p == nil || err != nil {
t.Fatal(err)
}
p, err = StopSandbox(ctx, p.ID())
if p == nil || err != nil {
t.Fatal(err)
}
}
func TestStopSandboxKataAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@ -733,48 +504,6 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) {
}
}
func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, err := RunSandbox(ctx, config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
_, err = os.Stat(sandboxDir)
if err != nil {
t.Fatal(err)
}
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
}
func TestRunSandboxKataAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@ -819,7 +548,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
bindUnmountAllRootfs(ctx, testDir, pImpl)
}
func TestRunSandboxFailing(t *testing.T) {
@ -1322,118 +1051,6 @@ func TestStopContainerNoopAgentSuccessful(t *testing.T) {
}
}
func TestStartStopContainerHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
contID := "100"
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, sandboxDir, err := createAndStartSandbox(ctx, config)
if p == nil || err != nil {
t.Fatal(err)
}
contConfig := newTestContainerConfigNoop(contID)
_, c, err := CreateContainer(ctx, p.ID(), contConfig)
if c == nil || err != nil {
t.Fatal(err)
}
contDir := filepath.Join(sandboxDir, contID)
_, err = os.Stat(contDir)
if err != nil {
t.Fatal(err)
}
c, err = StartContainer(ctx, p.ID(), contID)
if c == nil || err != nil {
t.Fatal(err)
}
c, err = StopContainer(ctx, p.ID(), contID)
if c == nil || err != nil {
t.Fatal(err)
}
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
}
func TestStartStopSandboxHyperstartAgentSuccessfulWithDefaultNetwork(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
config := newTestSandboxConfigHyperstartAgentDefaultNetwork()
n, err := ns.NewNS()
if err != nil {
t.Fatal(err)
}
defer n.Close()
config.NetworkConfig.NetNSPath = n.Path()
config.NetworkConfig.NetNsCreated = true
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, _, err := createAndStartSandbox(ctx, config)
if p == nil || err != nil {
t.Fatal(err)
}
v, err := StopSandbox(ctx, p.ID())
if v == nil || err != nil {
t.Fatal(err)
}
v, err = DeleteSandbox(ctx, p.ID())
if v == nil || err != nil {
t.Fatal(err)
}
}
func TestStopContainerFailingNoSandbox(t *testing.T) {
defer cleanUp()
@ -1549,74 +1166,6 @@ func TestEnterContainerNoopAgentSuccessful(t *testing.T) {
}
}
func TestEnterContainerHyperstartAgentSuccessful(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
contID := "100"
config := newTestSandboxConfigHyperstartAgent()
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(sockDir)
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
ctx := context.Background()
p, sandboxDir, err := createAndStartSandbox(ctx, config)
if p == nil || err != nil {
t.Fatal(err)
}
contConfig := newTestContainerConfigNoop(contID)
_, _, err = CreateContainer(ctx, p.ID(), contConfig)
if err != nil {
t.Fatal(err)
}
contDir := filepath.Join(sandboxDir, contID)
_, err = os.Stat(contDir)
if err != nil {
t.Fatal(err)
}
_, err = StartContainer(ctx, p.ID(), contID)
if err != nil {
t.Fatal(err)
}
cmd := newBasicTestCmd()
_, _, _, err = EnterContainer(ctx, p.ID(), contID, cmd)
if err != nil {
t.Fatal(err)
}
_, err = StopContainer(ctx, p.ID(), contID)
if err != nil {
t.Fatal(err)
}
pImpl, ok := p.(*Sandbox)
assert.True(t, ok)
bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
}
func TestEnterContainerFailingNoSandbox(t *testing.T) {
defer cleanUp()
contID := "100"
@ -2195,27 +1744,6 @@ func createStartStopDeleteContainers(b *testing.B, sandboxConfig SandboxConfig,
}
}
func BenchmarkCreateStartStopDeleteSandboxQemuHypervisorHyperstartAgentNetworkNoop(b *testing.B) {
for i := 0; i < b.N; i++ {
sandboxConfig := createNewSandboxConfig(QemuHypervisor, HyperstartAgent, HyperConfig{})
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(sockDir)
var t testing.T
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(&t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
createStartStopDeleteSandbox(b, sandboxConfig)
}
}
func BenchmarkCreateStartStopDeleteSandboxQemuHypervisorNoopAgentNetworkNoop(b *testing.B) {
for i := 0; i < b.N; i++ {
sandboxConfig := createNewSandboxConfig(QemuHypervisor, NoopAgentType, nil)
@ -2230,50 +1758,6 @@ func BenchmarkCreateStartStopDeleteSandboxMockHypervisorNoopAgentNetworkNoop(b *
}
}
func BenchmarkStartStop1ContainerQemuHypervisorHyperstartAgentNetworkNoop(b *testing.B) {
for i := 0; i < b.N; i++ {
sandboxConfig := createNewSandboxConfig(QemuHypervisor, HyperstartAgent, HyperConfig{})
contConfigs := createNewContainerConfigs(1)
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(sockDir)
var t testing.T
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(&t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
createStartStopDeleteContainers(b, sandboxConfig, contConfigs)
}
}
func BenchmarkStartStop10ContainerQemuHypervisorHyperstartAgentNetworkNoop(b *testing.B) {
for i := 0; i < b.N; i++ {
sandboxConfig := createNewSandboxConfig(QemuHypervisor, HyperstartAgent, HyperConfig{})
contConfigs := createNewContainerConfigs(10)
sockDir, err := testGenerateCCProxySockDir()
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(sockDir)
var t testing.T
testCCProxySockPath := fmt.Sprintf(testCCProxySockPathTempl, sockDir)
noopProxyURL = testCCProxyURLUnixScheme + testCCProxySockPath
proxy := mock.NewCCProxyMock(&t, testCCProxySockPath)
proxy.Start()
defer proxy.Stop()
createStartStopDeleteContainers(b, sandboxConfig, contConfigs)
}
}
func TestFetchSandbox(t *testing.T) {
defer cleanUp()

View File

@ -193,9 +193,6 @@ const (
// NoopAgentType is the No-Op agent.
NoopAgentType AgentType = "noop"
// HyperstartAgent is the Hyper hyperstart agent.
HyperstartAgent AgentType = "hyperstart"
// KataContainersAgent is the Kata Containers agent.
KataContainersAgent AgentType = "kata"
@ -848,7 +845,7 @@ func ProcessListContainer(sandboxID, containerID string, options ProcessListOpti
```Go
// This example creates and starts a single container sandbox,
// using qemu as the hypervisor and hyperstart as the VM agent.
// using qemu as the hypervisor and kata as the VM agent.
func Example_createAndStartSandbox() {
envs := []vc.EnvVar{
{
@ -877,8 +874,8 @@ func Example_createAndStartSandbox() {
HypervisorPath: "/usr/bin/qemu-system-x86_64",
}
// Use hyperstart default values for the agent.
agConfig := vc.HyperConfig{}
// Use kata default values for the agent.
agConfig := vc.KataAgentConfig{}
// VM resources
vmConfig := vc.Resources{
@ -889,14 +886,14 @@ func Example_createAndStartSandbox() {
// The sandbox configuration:
// - One container
// - Hypervisor is QEMU
// - Agent is hyperstart
// - Agent is kata
sandboxConfig := vc.SandboxConfig{
VMConfig: vmConfig,
HypervisorType: vc.QemuHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: vc.HyperstartAgent,
AgentType: vc.KataContainersAgent
AgentConfig: agConfig,
Containers: []vc.ContainerConfig{container},

View File

@ -17,7 +17,7 @@ import (
var containerRootfs = vc.RootFs{Target: "/var/lib/container/bundle/", Mounted: true}
// This example creates and starts a single container sandbox,
// using qemu as the hypervisor and hyperstart as the VM agent.
// using qemu as the hypervisor and kata as the VM agent.
func Example_createAndStartSandbox() {
envs := []types.EnvVar{
{
@ -47,18 +47,18 @@ func Example_createAndStartSandbox() {
MemorySize: 1024,
}
// Use hyperstart default values for the agent.
agConfig := vc.HyperConfig{}
// Use kata default values for the agent.
agConfig := vc.KataAgentConfig{}
// The sandbox configuration:
// - One container
// - Hypervisor is QEMU
// - Agent is hyperstart
// - Agent is kata
sandboxConfig := vc.SandboxConfig{
HypervisorType: vc.QemuHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: vc.HyperstartAgent,
AgentType: vc.KataContainersAgent,
AgentConfig: agConfig,
Containers: []vc.ContainerConfig{container},

View File

@ -76,18 +76,6 @@ var sandboxConfigFlags = []cli.Flag{
Usage: "the shim binary path",
},
cli.StringFlag{
Name: "hyper-ctl-sock-name",
Value: "",
Usage: "the hyperstart control socket name",
},
cli.StringFlag{
Name: "hyper-tty-sock-name",
Value: "",
Usage: "the hyperstart tty socket name",
},
cli.UintFlag{
Name: "cpus",
Value: 0,
@ -133,8 +121,6 @@ func buildKernelParams(config *vc.HypervisorConfig) error {
func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
var agConfig interface{}
hyperCtlSockName := context.String("hyper-ctl-sock-name")
hyperTtySockName := context.String("hyper-tty-sock-name")
proxyPath := context.String("proxy-path")
shimPath := context.String("shim-path")
machineType := context.String("machine-type")
@ -171,16 +157,7 @@ func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
}
netConfig := vc.NetworkConfig{}
switch *agentType {
case vc.HyperstartAgent:
agConfig = vc.HyperConfig{
SockCtlName: hyperCtlSockName,
SockTtyName: hyperTtySockName,
}
default:
agConfig = nil
}
proxyConfig := getProxyConfig(*proxyType, proxyPath)

File diff suppressed because it is too large Load Diff

View File

@ -1,304 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"reflect"
"testing"
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/stretchr/testify/assert"
"github.com/vishvananda/netlink"
)
var testRouteDest = "192.168.10.1/32"
var testRouteGateway = "192.168.0.0"
var testRouteDeviceName = "test_eth0"
var testRouteDestIPv6 = "2001:db8::/32"
func TestHyperstartGenerateSocketsSuccessful(t *testing.T) {
config := HyperConfig{
SockCtlName: "ctlSock",
SockTtyName: "ttySock",
}
sandbox := &Sandbox{
id: testSandboxID,
}
h := &hyper{}
h.generateSockets(sandbox, config)
expectedSockets := []types.Socket{
{
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 0),
ID: fmt.Sprintf(defaultIDTemplate, 0),
HostPath: config.SockCtlName,
Name: fmt.Sprintf(defaultChannelTemplate, 0),
},
{
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 1),
ID: fmt.Sprintf(defaultIDTemplate, 1),
HostPath: config.SockTtyName,
Name: fmt.Sprintf(defaultChannelTemplate, 1),
},
}
if !reflect.DeepEqual(expectedSockets, h.sockets) {
t.Fatalf("Expecting %+v, Got %+v", expectedSockets, h.sockets)
}
}
func TestHyperstartGenerateSocketsSuccessfulNoPathProvided(t *testing.T) {
config := HyperConfig{}
sandbox := &Sandbox{
id: testSandboxID,
}
h := &hyper{}
h.generateSockets(sandbox, config)
expectedSockets := []types.Socket{
{
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 0),
ID: fmt.Sprintf(defaultIDTemplate, 0),
HostPath: fmt.Sprintf(defaultSockPathTemplates[0], store.RunStoragePath, sandbox.id),
Name: fmt.Sprintf(defaultChannelTemplate, 0),
},
{
DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 1),
ID: fmt.Sprintf(defaultIDTemplate, 1),
HostPath: fmt.Sprintf(defaultSockPathTemplates[1], store.RunStoragePath, sandbox.id),
Name: fmt.Sprintf(defaultChannelTemplate, 1),
},
}
if !reflect.DeepEqual(expectedSockets, h.sockets) {
t.Fatalf("Expecting %+v, Got %+v", expectedSockets, h.sockets)
}
}
func testProcessHyperRoute(t *testing.T, route netlink.Route, deviceName string, expected *hyperstart.Route) {
h := &hyper{}
hyperRoute := h.processHyperRoute(route, deviceName)
if expected == nil {
if hyperRoute != nil {
t.Fatalf("Expecting route to be nil, Got %+v", hyperRoute)
} else {
return
}
}
// At this point, we know that "expected" != nil.
if !reflect.DeepEqual(*expected, *hyperRoute) {
t.Fatalf("Expecting %+v, Got %+v", *expected, *hyperRoute)
}
}
func TestProcessHyperRouteEmptyGWSuccessful(t *testing.T) {
expected := &hyperstart.Route{
Dest: testRouteDest,
Gateway: "",
Device: testRouteDeviceName,
}
_, dest, err := net.ParseCIDR(testRouteDest)
if err != nil {
t.Fatal(err)
}
route := netlink.Route{
Dst: dest,
Gw: net.IP{},
}
testProcessHyperRoute(t, route, testRouteDeviceName, expected)
}
func TestProcessHyperRouteEmptyDestSuccessful(t *testing.T) {
expected := &hyperstart.Route{
Dest: defaultRouteLabel,
Gateway: testRouteGateway,
Device: testRouteDeviceName,
}
_, dest, err := net.ParseCIDR(defaultRouteDest)
if err != nil {
t.Fatal(err)
}
route := netlink.Route{
Dst: dest,
Gw: net.ParseIP(testRouteGateway),
}
testProcessHyperRoute(t, route, testRouteDeviceName, expected)
}
func TestProcessHyperRouteDestIPv6Failure(t *testing.T) {
_, dest, err := net.ParseCIDR(testRouteDestIPv6)
if err != nil {
t.Fatal(err)
}
route := netlink.Route{
Dst: dest,
Gw: net.IP{},
}
testProcessHyperRoute(t, route, testRouteDeviceName, nil)
}
func TestHyperPathAPI(t *testing.T) {
assert := assert.New(t)
h1 := &hyper{}
h2 := &hyper{}
id := "foobar"
// getVMPath
path1 := h1.getVMPath(id)
path2 := h2.getVMPath(id)
assert.Equal(path1, path2)
// getSharePath
path1 = h1.getSharePath(id)
path2 = h2.getSharePath(id)
assert.Equal(path1, path2)
}
func TestHyperConfigure(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "hyperstart-test")
assert.Nil(err)
h := &hyper{}
m := &mockHypervisor{}
c := HyperConfig{}
id := "foobar"
invalidAgent := KataAgentConfig{}
err = h.configure(m, id, dir, true, invalidAgent)
assert.Nil(err)
err = h.configure(m, id, dir, true, c)
assert.Nil(err)
err = h.configure(m, id, dir, false, c)
assert.Nil(err)
}
func TestHyperReseedAPI(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
err := h.reseedRNG([]byte{})
assert.Nil(err)
}
func TestHyperUpdateInterface(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
_, err := h.updateInterface(nil)
assert.Nil(err)
}
func TestHyperListInterfaces(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
_, err := h.listInterfaces()
assert.Nil(err)
}
func TestHyperUpdateRoutes(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
_, err := h.updateRoutes(nil)
assert.Nil(err)
}
func TestHyperListRoutes(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
_, err := h.listRoutes()
assert.Nil(err)
}
func TestHyperSetProxy(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
p := &ccProxy{}
s := &Sandbox{
ctx: context.Background(),
}
vcStore, err := store.NewVCSandboxStore(s.ctx, "foobar")
assert.Nil(err)
s.store = vcStore
err = h.setProxy(s, p, 0, "")
assert.Error(err)
}
func TestHyperGetAgentUrl(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
url, err := h.getAgentURL()
assert.Nil(err)
assert.Empty(url)
}
func TestHyperCopyFile(t *testing.T) {
assert := assert.New(t)
h := &hyper{}
err := h.copyFile("", "")
assert.Nil(err)
}
func TestHyperCleanupSandbox(t *testing.T) {
assert := assert.New(t)
defaultSharedDirSaved := defaultSharedDir
defaultSharedDir, _ = ioutil.TempDir("", "hyper-cleanup")
defer func() {
defaultSharedDir = defaultSharedDirSaved
}()
s := Sandbox{
id: "testFoo",
}
dir := path.Join(defaultSharedDir, s.id)
err := os.MkdirAll(dir, 0777)
assert.Nil(err)
h := &hyper{}
h.cleanup(s.id)
if _, err = os.Stat(dir); os.IsExist(err) {
t.Fatalf("%s still exists\n", dir)
}
}

View File

@ -76,9 +76,11 @@ var (
kataNvdimmDevType = "nvdimm"
sharedDir9pOptions = []string{"trans=virtio,version=9p2000.L,cache=mmap", "nodev"}
shmDir = "shm"
ephemeralPath = filepath.Join(kataGuestSandboxDir, KataEphemeralDevType)
kataEphemeralDevType = "ephemeral"
ephemeralPath = filepath.Join(kataGuestSandboxDir, kataEphemeralDevType)
grpcMaxDataSize = int64(1024 * 1024)
localDirOptions = []string{"mode=0777"}
maxHostnameLen = 64
)
// KataAgentConfig is a structure storing information needed

View File

@ -653,10 +653,6 @@ func TestAgentConfigure(t *testing.T) {
c := KataAgentConfig{}
id := "foobar"
invalidAgent := HyperConfig{}
err = k.configure(h, id, dir, true, invalidAgent)
assert.Error(err)
err = k.configure(h, id, dir, true, c)
assert.Nil(err)

View File

@ -1,538 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package hyperstart
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"net"
"sync"
"time"
"github.com/sirupsen/logrus"
)
// Control command IDs
// Need to be in sync with hyperstart/src/api.h
const (
Version = "version"
StartSandbox = "startsandbox"
DestroySandbox = "destroysandbox"
ExecCmd = "execcmd"
Ready = "ready"
Ack = "ack"
Error = "error"
WinSize = "winsize"
Ping = "ping"
FinishSandbox = "finishsandbox"
Next = "next"
WriteFile = "writefile"
ReadFile = "readfile"
NewContainer = "newcontainer"
KillContainer = "killcontainer"
OnlineCPUMem = "onlinecpumem"
SetupInterface = "setupinterface"
SetupRoute = "setuproute"
RemoveContainer = "removecontainer"
PsContainer = "pscontainer"
)
// CodeList is the map making the relation between a string command
// and its corresponding code.
var CodeList = map[string]uint32{
Version: VersionCode,
StartSandbox: StartSandboxCode,
DestroySandbox: DestroySandboxCode,
ExecCmd: ExecCmdCode,
Ready: ReadyCode,
Ack: AckCode,
Error: ErrorCode,
WinSize: WinsizeCode,
Ping: PingCode,
Next: NextCode,
WriteFile: WriteFileCode,
ReadFile: ReadFileCode,
NewContainer: NewContainerCode,
KillContainer: KillContainerCode,
OnlineCPUMem: OnlineCPUMemCode,
SetupInterface: SetupInterfaceCode,
SetupRoute: SetupRouteCode,
RemoveContainer: RemoveContainerCode,
PsContainer: PsContainerCode,
}
// Values related to the communication on control channel.
const (
CtlHdrSize = 8
CtlHdrLenOffset = 4
)
// Values related to the communication on tty channel.
const (
TtyHdrSize = 12
TtyHdrLenOffset = 8
)
type connState struct {
sync.Mutex
opened bool
}
func (c *connState) close() {
c.Lock()
defer c.Unlock()
c.opened = false
}
func (c *connState) open() {
c.Lock()
defer c.Unlock()
c.opened = true
}
func (c *connState) closed() bool {
c.Lock()
defer c.Unlock()
return !c.opened
}
// Hyperstart is the base structure for hyperstart.
type Hyperstart struct {
ctlSerial, ioSerial string
sockType string
ctl, io net.Conn
ctlState, ioState connState
// ctl access is arbitrated by ctlMutex. We can only allow a single
// "transaction" (write command + read answer) at a time
ctlMutex sync.Mutex
ctlMulticast *multicast
ctlChDone chan interface{}
}
var hyperLog = logrus.FieldLogger(logrus.New())
// SetLogger sets the logger for hyperstart package.
func SetLogger(ctx context.Context, logger logrus.FieldLogger) {
hyperLog = logger.WithFields(logrus.Fields{
"source": "virtcontainers",
"subsystem": "hyperstart",
})
}
// NewHyperstart returns a new hyperstart structure.
func NewHyperstart(ctlSerial, ioSerial, sockType string) *Hyperstart {
return &Hyperstart{
ctlSerial: ctlSerial,
ioSerial: ioSerial,
sockType: sockType,
}
}
// GetCtlSock returns the internal CTL sock.
func (h *Hyperstart) GetCtlSock() net.Conn {
return h.ctl
}
// GetIoSock returns the internal IO sock.
func (h *Hyperstart) GetIoSock() net.Conn {
return h.io
}
// GetCtlSockPath returns the internal CTL sock path.
func (h *Hyperstart) GetCtlSockPath() string {
return h.ctlSerial
}
// GetIoSockPath returns the internal IO sock path.
func (h *Hyperstart) GetIoSockPath() string {
return h.ioSerial
}
// GetSockType returns the internal sock type.
func (h *Hyperstart) GetSockType() string {
return h.sockType
}
// OpenSocketsNoMulticast opens both CTL and IO sockets, without
// starting the multicast.
func (h *Hyperstart) OpenSocketsNoMulticast() error {
var err error
h.ctl, err = net.Dial(h.sockType, h.ctlSerial)
if err != nil {
return err
}
h.ctlState.open()
h.io, err = net.Dial(h.sockType, h.ioSerial)
if err != nil {
h.ctl.Close()
return err
}
h.ioState.open()
return nil
}
// OpenSockets opens both CTL and IO sockets.
func (h *Hyperstart) OpenSockets() error {
if err := h.OpenSocketsNoMulticast(); err != nil {
return err
}
h.ctlChDone = make(chan interface{})
h.ctlMulticast = startCtlMonitor(h.ctl, h.ctlChDone)
return nil
}
// CloseSockets closes both CTL and IO sockets.
func (h *Hyperstart) CloseSockets() error {
if !h.ctlState.closed() {
if h.ctlChDone != nil {
// Wait for the CTL channel to be terminated.
select {
case <-h.ctlChDone:
break
case <-time.After(time.Duration(3) * time.Second):
return fmt.Errorf("CTL channel did not end as expected")
}
}
err := h.ctl.Close()
if err != nil {
return err
}
h.ctlState.close()
}
if !h.ioState.closed() {
err := h.io.Close()
if err != nil {
return err
}
h.ioState.close()
}
h.ctlMulticast = nil
return nil
}
// SetDeadline sets a timeout for CTL connection.
func (h *Hyperstart) SetDeadline(t time.Time) error {
err := h.ctl.SetDeadline(t)
if err != nil {
return err
}
return nil
}
// IsStarted returns about connection status.
func (h *Hyperstart) IsStarted() bool {
ret := false
timeoutDuration := 1 * time.Second
if h.ctlState.closed() {
return ret
}
h.SetDeadline(time.Now().Add(timeoutDuration))
_, err := h.SendCtlMessage(Ping, nil)
if err == nil {
ret = true
}
h.SetDeadline(time.Time{})
if !ret {
h.CloseSockets()
}
return ret
}
// FormatMessage formats hyperstart messages.
func FormatMessage(payload interface{}) ([]byte, error) {
var payloadSlice []byte
var err error
if payload != nil {
switch p := payload.(type) {
case string:
payloadSlice = []byte(p)
default:
payloadSlice, err = json.Marshal(p)
if err != nil {
return nil, err
}
}
}
return payloadSlice, nil
}
// ReadCtlMessage reads an hyperstart message from conn and returns a decoded message.
//
// This is a low level function, for a full and safe transaction on the
// hyperstart control serial link, use SendCtlMessage.
func ReadCtlMessage(conn net.Conn) (*DecodedMessage, error) {
needRead := CtlHdrSize
length := 0
read := 0
buf := make([]byte, 512)
res := []byte{}
for read < needRead {
want := needRead - read
if want > 512 {
want = 512
}
nr, err := conn.Read(buf[:want])
if err != nil {
return nil, err
}
res = append(res, buf[:nr]...)
read = read + nr
if length == 0 && read >= CtlHdrSize {
length = int(binary.BigEndian.Uint32(res[CtlHdrLenOffset:CtlHdrSize]))
if length > CtlHdrSize {
needRead = length
}
}
}
return &DecodedMessage{
Code: binary.BigEndian.Uint32(res[:CtlHdrLenOffset]),
Message: res[CtlHdrSize:],
}, nil
}
// WriteCtlMessage writes an hyperstart message to conn.
//
// This is a low level function, for a full and safe transaction on the
// hyperstart control serial link, use SendCtlMessage.
func (h *Hyperstart) WriteCtlMessage(conn net.Conn, m *DecodedMessage) error {
length := len(m.Message) + CtlHdrSize
// XXX: Support sending messages by chunks to support messages over
// 10240 bytes. That limit is from hyperstart src/init.c,
// hyper_channel_ops, rbuf_size.
if length > 10240 {
return fmt.Errorf("message too long %d", length)
}
msg := make([]byte, length)
binary.BigEndian.PutUint32(msg[:], m.Code)
binary.BigEndian.PutUint32(msg[CtlHdrLenOffset:], uint32(length))
copy(msg[CtlHdrSize:], m.Message)
_, err := conn.Write(msg)
if err != nil {
return err
}
return nil
}
// ReadIoMessageWithConn returns data coming from the specified IO channel.
func ReadIoMessageWithConn(conn net.Conn) (*TtyMessage, error) {
needRead := TtyHdrSize
length := 0
read := 0
buf := make([]byte, 512)
res := []byte{}
for read < needRead {
want := needRead - read
if want > 512 {
want = 512
}
nr, err := conn.Read(buf[:want])
if err != nil {
return nil, err
}
res = append(res, buf[:nr]...)
read = read + nr
if length == 0 && read >= TtyHdrSize {
length = int(binary.BigEndian.Uint32(res[TtyHdrLenOffset:TtyHdrSize]))
if length > TtyHdrSize {
needRead = length
}
}
}
return &TtyMessage{
Session: binary.BigEndian.Uint64(res[:TtyHdrLenOffset]),
Message: res[TtyHdrSize:],
}, nil
}
// ReadIoMessage returns data coming from the IO channel.
func (h *Hyperstart) ReadIoMessage() (*TtyMessage, error) {
return ReadIoMessageWithConn(h.io)
}
// SendIoMessageWithConn sends data to the specified IO channel.
func SendIoMessageWithConn(conn net.Conn, ttyMsg *TtyMessage) error {
length := len(ttyMsg.Message) + TtyHdrSize
// XXX: Support sending messages by chunks to support messages over
// 10240 bytes. That limit is from hyperstart src/init.c,
// hyper_channel_ops, rbuf_size.
if length > 10240 {
return fmt.Errorf("message too long %d", length)
}
msg := make([]byte, length)
binary.BigEndian.PutUint64(msg[:], ttyMsg.Session)
binary.BigEndian.PutUint32(msg[TtyHdrLenOffset:], uint32(length))
copy(msg[TtyHdrSize:], ttyMsg.Message)
n, err := conn.Write(msg)
if err != nil {
return err
}
if n != length {
return fmt.Errorf("%d bytes written out of %d expected", n, length)
}
return nil
}
// SendIoMessage sends data to the IO channel.
func (h *Hyperstart) SendIoMessage(ttyMsg *TtyMessage) error {
return SendIoMessageWithConn(h.io, ttyMsg)
}
// CodeFromCmd translates a string command to its corresponding code.
func (h *Hyperstart) CodeFromCmd(cmd string) (uint32, error) {
_, ok := CodeList[cmd]
if !ok {
return math.MaxUint32, fmt.Errorf("unknown command '%s'", cmd)
}
return CodeList[cmd], nil
}
// CheckReturnedCode ensures we did not receive an ERROR code.
func (h *Hyperstart) CheckReturnedCode(recvMsg *DecodedMessage, expectedCode uint32) error {
if recvMsg.Code != expectedCode {
if recvMsg.Code == ErrorCode {
return fmt.Errorf("ERROR received from VM agent, control msg received : %s", recvMsg.Message)
}
return fmt.Errorf("CMD ID received %d not matching expected %d, control msg received : %s", recvMsg.Code, expectedCode, recvMsg.Message)
}
return nil
}
// WaitForReady waits for a READY message on CTL channel.
func (h *Hyperstart) WaitForReady() error {
if h.ctlMulticast == nil {
return fmt.Errorf("No multicast available for CTL channel")
}
channel, err := h.ctlMulticast.listen("", "", replyType)
if err != nil {
return err
}
msg := <-channel
err = h.CheckReturnedCode(msg, ReadyCode)
if err != nil {
return err
}
return nil
}
// WaitForPAE waits for a PROCESSASYNCEVENT message on CTL channel.
func (h *Hyperstart) WaitForPAE(containerID, processID string) (*PAECommand, error) {
if h.ctlMulticast == nil {
return nil, fmt.Errorf("No multicast available for CTL channel")
}
channel, err := h.ctlMulticast.listen(containerID, processID, eventType)
if err != nil {
return nil, err
}
msg := <-channel
var paeData PAECommand
err = json.Unmarshal(msg.Message, &paeData)
if err != nil {
return nil, err
}
return &paeData, nil
}
// SendCtlMessage sends a message to the CTL channel.
//
// This function does a full transaction over the CTL channel: it will rely on the
// multicaster to register a listener reading over the CTL channel. Then it writes
// a command and waits for the multicaster to send hyperstart's answer back before
// it can return.
// Several concurrent calls to SendCtlMessage are allowed, the function ensuring
// proper serialization of the communication by making the listener registration
// and the command writing an atomic operation protected by a mutex.
// Waiting for the reply from multicaster doesn't need to be protected by this mutex.
func (h *Hyperstart) SendCtlMessage(cmd string, data []byte) (*DecodedMessage, error) {
if h.ctlMulticast == nil {
return nil, fmt.Errorf("No multicast available for CTL channel")
}
h.ctlMutex.Lock()
channel, err := h.ctlMulticast.listen("", "", replyType)
if err != nil {
h.ctlMutex.Unlock()
return nil, err
}
code, err := h.CodeFromCmd(cmd)
if err != nil {
h.ctlMutex.Unlock()
return nil, err
}
msgSend := &DecodedMessage{
Code: code,
Message: data,
}
err = h.WriteCtlMessage(h.ctl, msgSend)
if err != nil {
h.ctlMutex.Unlock()
return nil, err
}
h.ctlMutex.Unlock()
msgRecv := <-channel
err = h.CheckReturnedCode(msgRecv, AckCode)
if err != nil {
return nil, err
}
return msgRecv, nil
}

View File

@ -1,572 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package hyperstart_test
import (
"math"
"net"
"reflect"
"testing"
"time"
. "github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart/mock"
)
const (
testSockType = "unix"
testSequence = uint64(100)
testMessage = "test_message"
)
func connectHyperstartNoMulticast(h *Hyperstart) error {
return h.OpenSocketsNoMulticast()
}
func connectHyperstart(h *Hyperstart) error {
return h.OpenSockets()
}
func disconnectHyperstart(h *Hyperstart) {
h.CloseSockets()
}
func connectMockHyperstart(t *testing.T, multiCast bool) (*mock.Hyperstart, *Hyperstart, error) {
mockHyper := mock.NewHyperstart(t)
mockHyper.Start()
ctlSock, ioSock := mockHyper.GetSocketPaths()
h := NewHyperstart(ctlSock, ioSock, testSockType)
var err error
if multiCast {
err = connectHyperstart(h)
} else {
err = connectHyperstartNoMulticast(h)
}
if err != nil {
mockHyper.Stop()
return nil, nil, err
}
return mockHyper, h, nil
}
func TestNewHyperstart(t *testing.T) {
ctlSock := "/tmp/test_hyper.sock"
ioSock := "/tmp/test_tty.sock"
sockType := "test_unix"
h := NewHyperstart(ctlSock, ioSock, sockType)
resultCtlSockPath := h.GetCtlSockPath()
resultIoSockPath := h.GetIoSockPath()
resultSockType := h.GetSockType()
if resultCtlSockPath != ctlSock {
t.Fatalf("CTL sock result %s should be the same than %s", resultCtlSockPath, ctlSock)
}
if resultIoSockPath != ioSock {
t.Fatalf("IO sock result %s should be the same than %s", resultIoSockPath, ioSock)
}
if resultSockType != sockType {
t.Fatalf("Sock type result %s should be the same than %s", resultSockType, sockType)
}
}
func TestOpenSockets(t *testing.T) {
mockHyper := mock.NewHyperstart(t)
mockHyper.Start()
ctlSock, ioSock := mockHyper.GetSocketPaths()
h := NewHyperstart(ctlSock, ioSock, testSockType)
err := h.OpenSockets()
if err != nil {
mockHyper.Stop()
t.Fatal()
}
mockHyper.Stop()
disconnectHyperstart(h)
}
func TestCloseSockets(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
mockHyper.Stop()
err = h.CloseSockets()
if err != nil {
t.Fatal()
}
}
func TestSetDeadline(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, false)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
timeoutDuration := 1 * time.Second
err = h.SetDeadline(time.Now().Add(timeoutDuration))
if err != nil {
t.Fatal()
}
mockHyper.SendMessage(ReadyCode, []byte{})
buf := make([]byte, 512)
_, err = h.GetCtlSock().Read(buf)
if err != nil {
t.Fatal()
}
err = h.SetDeadline(time.Now().Add(timeoutDuration))
if err != nil {
t.Fatal()
}
time.Sleep(timeoutDuration)
_, err = h.GetCtlSock().Read(buf)
netErr, ok := err.(net.Error)
if ok && netErr.Timeout() == false {
t.Fatal()
}
}
func TestIsStartedFalse(t *testing.T) {
h := &Hyperstart{}
if h.IsStarted() == true {
t.Fatal()
}
}
func TestIsStartedTrue(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
if h.IsStarted() == false {
t.Fatal()
}
}
func testFormatMessage(t *testing.T, payload interface{}, expected []byte) {
res, err := FormatMessage(payload)
if err != nil {
t.Fatal()
}
if reflect.DeepEqual(res, expected) == false {
t.Fatal()
}
}
func TestFormatMessageFromString(t *testing.T) {
payload := testMessage
expectedOut := []byte(payload)
testFormatMessage(t, payload, expectedOut)
}
type TestStruct struct {
FieldString string `json:"fieldString"`
FieldInt int `json:"fieldInt"`
}
func TestFormatMessageFromStruct(t *testing.T) {
payload := TestStruct{
FieldString: "test_string",
FieldInt: 100,
}
expectedOut := []byte("{\"fieldString\":\"test_string\",\"fieldInt\":100}")
testFormatMessage(t, payload, expectedOut)
}
func TestReadCtlMessage(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, false)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
expected := &DecodedMessage{
Code: ReadyCode,
Message: []byte{},
}
mockHyper.SendMessage(int(expected.Code), expected.Message)
reply, err := ReadCtlMessage(h.GetCtlSock())
if err != nil {
t.Fatal()
}
if reflect.DeepEqual(reply, expected) == false {
t.Fatal()
}
}
func TestWriteCtlMessage(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, false)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
msg := DecodedMessage{
Code: PingCode,
Message: []byte{},
}
err = h.WriteCtlMessage(h.GetCtlSock(), &msg)
if err != nil {
t.Fatal()
}
for {
reply, err := ReadCtlMessage(h.GetCtlSock())
if err != nil {
t.Fatal()
}
if reply.Code == NextCode {
continue
}
err = h.CheckReturnedCode(reply, AckCode)
if err != nil {
t.Fatal()
}
break
}
msgs := mockHyper.GetLastMessages()
if msgs == nil {
t.Fatal()
}
if msgs[0].Code != msg.Code || string(msgs[0].Message) != string(msg.Message) {
t.Fatal()
}
}
func TestReadIoMessage(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
mockHyper.SendIo(testSequence, []byte(testMessage))
msg, err := h.ReadIoMessage()
if err != nil {
t.Fatal()
}
if msg.Session != testSequence || string(msg.Message) != testMessage {
t.Fatal()
}
}
func TestReadIoMessageWithConn(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
mockHyper.SendIo(testSequence, []byte(testMessage))
msg, err := ReadIoMessageWithConn(h.GetIoSock())
if err != nil {
t.Fatal()
}
if msg.Session != testSequence || string(msg.Message) != testMessage {
t.Fatal()
}
}
func TestSendIoMessage(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
msg := &TtyMessage{
Session: testSequence,
Message: []byte(testMessage),
}
err = h.SendIoMessage(msg)
if err != nil {
t.Fatal()
}
buf := make([]byte, 512)
n, seqRecv := mockHyper.ReadIo(buf)
if seqRecv != testSequence || string(buf[TtyHdrSize:n]) != testMessage {
t.Fatal()
}
}
func TestSendIoMessageWithConn(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
msg := &TtyMessage{
Session: testSequence,
Message: []byte(testMessage),
}
err = SendIoMessageWithConn(h.GetIoSock(), msg)
if err != nil {
t.Fatal()
}
buf := make([]byte, 512)
n, seqRecv := mockHyper.ReadIo(buf)
if seqRecv != testSequence || string(buf[TtyHdrSize:n]) != testMessage {
t.Fatal()
}
}
func testCodeFromCmd(t *testing.T, cmd string, expected uint32) {
h := &Hyperstart{}
code, err := h.CodeFromCmd(cmd)
if err != nil || code != expected {
t.Fatal()
}
}
func TestCodeFromCmdVersion(t *testing.T) {
testCodeFromCmd(t, Version, VersionCode)
}
func TestCodeFromCmdStartSandbox(t *testing.T) {
testCodeFromCmd(t, StartSandbox, StartSandboxCode)
}
func TestCodeFromCmdDestroySandbox(t *testing.T) {
testCodeFromCmd(t, DestroySandbox, DestroySandboxCode)
}
func TestCodeFromCmdExecCmd(t *testing.T) {
testCodeFromCmd(t, ExecCmd, ExecCmdCode)
}
func TestCodeFromCmdReady(t *testing.T) {
testCodeFromCmd(t, Ready, ReadyCode)
}
func TestCodeFromCmdAck(t *testing.T) {
testCodeFromCmd(t, Ack, AckCode)
}
func TestCodeFromCmdError(t *testing.T) {
testCodeFromCmd(t, Error, ErrorCode)
}
func TestCodeFromCmdWinSize(t *testing.T) {
testCodeFromCmd(t, WinSize, WinsizeCode)
}
func TestCodeFromCmdPing(t *testing.T) {
testCodeFromCmd(t, Ping, PingCode)
}
func TestCodeFromCmdNext(t *testing.T) {
testCodeFromCmd(t, Next, NextCode)
}
func TestCodeFromCmdWriteFile(t *testing.T) {
testCodeFromCmd(t, WriteFile, WriteFileCode)
}
func TestCodeFromCmdReadFile(t *testing.T) {
testCodeFromCmd(t, ReadFile, ReadFileCode)
}
func TestCodeFromCmdNewContainer(t *testing.T) {
testCodeFromCmd(t, NewContainer, NewContainerCode)
}
func TestCodeFromCmdKillContainer(t *testing.T) {
testCodeFromCmd(t, KillContainer, KillContainerCode)
}
func TestCodeFromCmdOnlineCPUMem(t *testing.T) {
testCodeFromCmd(t, OnlineCPUMem, OnlineCPUMemCode)
}
func TestCodeFromCmdSetupInterface(t *testing.T) {
testCodeFromCmd(t, SetupInterface, SetupInterfaceCode)
}
func TestCodeFromCmdSetupRoute(t *testing.T) {
testCodeFromCmd(t, SetupRoute, SetupRouteCode)
}
func TestCodeFromCmdRemoveContainer(t *testing.T) {
testCodeFromCmd(t, RemoveContainer, RemoveContainerCode)
}
func TestCodeFromCmdUnknown(t *testing.T) {
h := &Hyperstart{}
code, err := h.CodeFromCmd("unknown")
if err == nil || code != math.MaxUint32 {
t.Fatal()
}
}
func testCheckReturnedCode(t *testing.T, recvMsg *DecodedMessage, refCode uint32) {
h := &Hyperstart{}
err := h.CheckReturnedCode(recvMsg, refCode)
if err != nil {
t.Fatal()
}
}
func TestCheckReturnedCodeList(t *testing.T) {
for _, code := range CodeList {
recvMsg := DecodedMessage{Code: code}
testCheckReturnedCode(t, &recvMsg, code)
}
}
func testCheckReturnedCodeFailure(t *testing.T, recvMsg *DecodedMessage, refCode uint32) {
h := &Hyperstart{}
err := h.CheckReturnedCode(recvMsg, refCode)
if err == nil {
t.Fatal()
}
}
func TestCheckReturnedCodeListWrong(t *testing.T) {
for _, code := range CodeList {
msg := DecodedMessage{Code: code}
if code != ReadyCode {
testCheckReturnedCodeFailure(t, &msg, ReadyCode)
} else {
testCheckReturnedCodeFailure(t, &msg, PingCode)
}
}
}
func TestWaitForReady(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
mockHyper.SendMessage(int(ReadyCode), []byte{})
err = h.WaitForReady()
if err != nil {
t.Fatal()
}
}
func TestWaitForReadyError(t *testing.T) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
mockHyper.SendMessage(int(ErrorCode), []byte{})
err = h.WaitForReady()
if err == nil {
t.Fatal()
}
}
var cmdList = []string{
Version,
StartSandbox,
DestroySandbox,
ExecCmd,
Ready,
Ack,
Error,
WinSize,
Ping,
Next,
NewContainer,
KillContainer,
OnlineCPUMem,
SetupInterface,
SetupRoute,
RemoveContainer,
}
func testSendCtlMessage(t *testing.T, cmd string) {
mockHyper, h, err := connectMockHyperstart(t, true)
if err != nil {
t.Fatal()
}
defer disconnectHyperstart(h)
defer mockHyper.Stop()
msg, err := h.SendCtlMessage(cmd, []byte{})
if err != nil {
t.Fatal()
}
if msg.Code != AckCode {
t.Fatal()
}
}
func TestSendCtlMessage(t *testing.T) {
for _, cmd := range cmdList {
testSendCtlMessage(t, cmd)
}
}

View File

@ -1,355 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package mock
import (
"encoding/binary"
"encoding/hex"
"fmt"
"net"
"os"
"path/filepath"
"sync"
"testing"
hyper "github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
"github.com/stretchr/testify/assert"
)
// Control command string IDs
const (
Version = "version"
StartSandbox = "startsandbox"
DestroySandbox = "destroysandbox"
ExecCmd = "execcmd"
Ready = "ready"
Ack = "ack"
Error = "error"
WinSize = "winsize"
Ping = "ping"
FinishSandbox = "finishsandbox"
Next = "next"
WriteFile = "writefile"
ReadFile = "readfile"
NewContainer = "newcontainer"
KillContainer = "killcontainer"
RemoveContainer = "removecontainer"
OnlineCPUMem = "onlinecpumem"
SetupInterface = "setupinterface"
SetupRoute = "setuproute"
)
// Hyperstart is an object mocking the hyperstart agent.
type Hyperstart struct {
t *testing.T
ctlSocketPath, ioSocketPath string
ctlListener, ioListener *net.UnixListener
ctl, io net.Conn
// Start() will launch two goroutines to accept connections on the ctl
// and io sockets. Those goroutine will exit once the first connection
// is accepted or when the listening socket is closed. wgConnected can
// be used to make sure we've accepted connections to both sockets
wgConnected sync.WaitGroup
// We then have two other goroutines to handle communication on those
// sockets.
wg sync.WaitGroup
// Keep the list of messages received by hyperstart, older first, for
// later inspection with GetLastMessages()
lastMessages []hyper.DecodedMessage
}
func newMessageList() []hyper.DecodedMessage {
return make([]hyper.DecodedMessage, 0, 10)
}
// NewHyperstart creates a new hyperstart instance.
func NewHyperstart(t *testing.T) *Hyperstart {
dir := os.TempDir()
ctlSocketPath := filepath.Join(dir, "mock.hyper."+nextSuffix()+".0.sock")
ioSocketPath := filepath.Join(dir, "mock.hyper."+nextSuffix()+".1.sock")
return &Hyperstart{
t: t,
ctlSocketPath: ctlSocketPath,
ioSocketPath: ioSocketPath,
lastMessages: newMessageList(),
}
}
// GetSocketPaths returns the ctl and io socket paths, respectively
func (h *Hyperstart) GetSocketPaths() (string, string) {
return h.ctlSocketPath, h.ioSocketPath
}
// GetLastMessages returns list of messages received by hyperstart, older
// first. This function only returns the messages:
// - since Start on the first invocation
// - since the last GetLastMessages for subsequent invocations
func (h *Hyperstart) GetLastMessages() []hyper.DecodedMessage {
msgs := h.lastMessages
h.lastMessages = newMessageList()
return msgs
}
func (h *Hyperstart) logf(format string, args ...interface{}) {
h.t.Logf("[hyperstart] "+format, args...)
}
func (h *Hyperstart) logData(data []byte) {
h.t.Log(hex.Dump(data))
}
//
// ctl channel
//
const ctlHeaderSize = 8
func (h *Hyperstart) writeCtl(data []byte) error {
h.wgConnected.Wait()
n, err := h.ctl.Write(data)
if err != nil {
return fmt.Errorf("Connection broken, cannot send data")
}
assert.Equal(h.t, n, len(data))
return nil
}
// SendMessage makes hyperstart send the hyper command cmd along with optional
// data on the control channel
func (h *Hyperstart) SendMessage(cmd int, data []byte) {
length := ctlHeaderSize + len(data)
header := make([]byte, ctlHeaderSize)
binary.BigEndian.PutUint32(header[:], uint32(cmd))
binary.BigEndian.PutUint32(header[4:], uint32(length))
err := h.writeCtl(header)
if err != nil {
return
}
if len(data) == 0 {
return
}
h.writeCtl(data)
}
func (h *Hyperstart) readCtl(data []byte) error {
h.wgConnected.Wait()
n, err := h.ctl.Read(data)
if err != nil {
return err
}
assert.Equal(h.t, n, len(data))
return nil
}
func (h *Hyperstart) ackData(nBytes int) {
data := make([]byte, 4)
binary.BigEndian.PutUint32(data[:], uint32(nBytes))
h.SendMessage(hyper.NextCode, data)
}
func (h *Hyperstart) readMessage() (int, []byte, error) {
buf := make([]byte, ctlHeaderSize)
if err := h.readCtl(buf); err != nil {
return -1, buf, err
}
h.ackData(len(buf))
cmd := int(binary.BigEndian.Uint32(buf[:4]))
length := int(binary.BigEndian.Uint32(buf[4:8]))
assert.True(h.t, length >= 8)
length -= 8
if length == 0 {
return cmd, nil, nil
}
data := make([]byte, length)
if err := h.readCtl(data); err != nil {
return -1, buf, err
}
h.ackData(len(data))
return cmd, data, nil
}
func (h *Hyperstart) handleCtl() {
for {
cmd, data, err := h.readMessage()
if err != nil {
break
}
if len(data) != 0 {
h.logData(data)
}
h.lastMessages = append(h.lastMessages, hyper.DecodedMessage{
Code: uint32(cmd),
Message: data,
})
// answer back with the message exit status
// XXX: may be interesting to be able to configure the mock
// hyperstart to fail and test the reaction of proxy/clients
h.SendMessage(hyper.AckCode, nil)
}
h.wg.Done()
}
//
// io channel
//
const ioHeaderSize = 12
func (h *Hyperstart) writeIo(data []byte) {
h.wgConnected.Wait()
n, err := h.io.Write(data)
assert.Nil(h.t, err)
assert.Equal(h.t, n, len(data))
}
// SendIo sends a packet of I/O data to a client connected the I/O channel.
// Multiple I/O streams are multiplexed on that channel. seq specifies which
// steam the data belongs to.
func (h *Hyperstart) SendIo(seq uint64, data []byte) {
length := ioHeaderSize + len(data)
header := make([]byte, ioHeaderSize)
binary.BigEndian.PutUint64(header[:], seq)
binary.BigEndian.PutUint32(header[8:], uint32(length))
h.writeIo(header)
if len(data) == 0 {
return
}
h.writeIo(data)
}
// SendIoString sends a string a client connected the I/O channel.
// Multiple I/O streams are multiplexed on that channel. seq specifies which
// steam the data belongs to.
func (h *Hyperstart) SendIoString(seq uint64, data string) {
h.SendIo(seq, []byte(data))
}
// CloseIo closes the I/O stream specified by seq.
func (h *Hyperstart) CloseIo(seq uint64) {
h.SendIo(seq, nil)
}
// SendExitStatus sends the exit status on the I/O streams specified by seq.
// The exit status should only be sent after the stream has been closed with
// CloseIo.
func (h *Hyperstart) SendExitStatus(seq uint64, exitStatus uint8) {
status := []byte{exitStatus}
h.SendIo(seq, status)
}
// ReadIo reads data that has been sent on the I/O channel by a client. It
// returns the full packet (header & data) as well as the seq number decoded
// from the header.
func (h *Hyperstart) ReadIo(buf []byte) (n int, seq uint64) {
h.wgConnected.Wait()
n, err := h.io.Read(buf)
assert.Nil(h.t, err)
seq = binary.BigEndian.Uint64(buf[:8])
return
}
type acceptCb func(c net.Conn)
func (h *Hyperstart) startListening(path string, cb acceptCb) *net.UnixListener {
addr := &net.UnixAddr{Name: path, Net: "unix"}
l, err := net.ListenUnix("unix", addr)
assert.Nil(h.t, err)
go func() {
c, err := l.Accept()
if err != nil {
h.logf("%s: Connection failed %s\n", path, err)
cb(nil)
return
}
cb(c)
}()
return l
}
// Start will
// Once finished with the Hyperstart object, Close must be called.
func (h *Hyperstart) Start() {
h.wgConnected.Add(1)
h.wgConnected.Add(1)
h.ctlListener = h.startListening(h.ctlSocketPath, func(s net.Conn) {
// a client is now connected to the ctl socket
h.ctl = s
// Close() was called before we had a chance to accept a
// connection.
if s == nil {
h.wgConnected.Done()
return
}
// start the goroutine that will handle the ctl socket
h.wg.Add(1)
go h.handleCtl()
// we need signal wgConnected late, so wg.Add(1) is done before
// the wg.Wait() in Close()
// See https://golang.org/pkg/sync/#WaitGroup.Wait:
// "Note that calls with a positive delta that occur when the
// counter is zero must happen before a Wait"
h.wgConnected.Done()
})
h.ioListener = h.startListening(h.ioSocketPath, func(s net.Conn) {
// a client is now connected to the ctl socket
h.io = s
h.wgConnected.Done()
})
}
// Stop closes all internal resources and waits for goroutines started by Start
// to finish. Stop shouldn't be called if Start hasn't been called.
func (h *Hyperstart) Stop() {
h.wgConnected.Wait()
h.ctl.Close()
h.io.Close()
h.ctlListener.Close()
h.ioListener.Close()
h.wg.Wait()
os.Remove(h.ctlSocketPath)
os.Remove(h.ioSocketPath)
}

View File

@ -1,28 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package mock
import (
"fmt"
"os"
"path/filepath"
)
// GetTmpPath will return a filename suitable for a tempory file according to
// the format string given in argument. The format string must contain a single
// %s which will be replaced by a random string. Eg.:
//
// GetTmpPath("test.foo.%s.sock")
//
// will return something like:
//
// "/tmp/test.foo.832222621.sock"
func GetTmpPath(format string) string {
filename := fmt.Sprintf(format, nextSuffix())
dir := os.TempDir()
return filepath.Join(dir, filename)
}

View File

@ -1,33 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package mock
import (
"os"
"strconv"
"sync"
"time"
)
// Taken from https://golang.org/src/io/ioutil/tempfile.go?s=#L19
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}

View File

@ -1,164 +0,0 @@
// Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package hyperstart
import (
"encoding/json"
"fmt"
"net"
"sync"
)
type ctlDataType string
const (
eventType ctlDataType = "ctlEvent"
replyType ctlDataType = "ctlReply"
)
type multicast struct {
bufReplies []*DecodedMessage
reply []chan *DecodedMessage
event map[string]chan *DecodedMessage
ctl net.Conn
sync.Mutex
}
func newMulticast(ctlConn net.Conn) *multicast {
return &multicast{
bufReplies: []*DecodedMessage{},
reply: []chan *DecodedMessage{},
event: make(map[string]chan *DecodedMessage),
ctl: ctlConn,
}
}
func startCtlMonitor(ctlConn net.Conn, done chan<- interface{}) *multicast {
ctlMulticast := newMulticast(ctlConn)
go func() {
for {
msg, err := ReadCtlMessage(ctlMulticast.ctl)
if err != nil {
hyperLog.Infof("Read on CTL channel ended: %s", err)
break
}
err = ctlMulticast.write(msg)
if err != nil {
hyperLog.Errorf("Multicaster write error: %s", err)
break
}
}
close(done)
}()
return ctlMulticast
}
func (m *multicast) buildEventID(containerID, processID string) string {
return fmt.Sprintf("%s-%s", containerID, processID)
}
func (m *multicast) sendEvent(msg *DecodedMessage) error {
var paeData PAECommand
err := json.Unmarshal(msg.Message, &paeData)
if err != nil {
return err
}
uniqueID := m.buildEventID(paeData.Container, paeData.Process)
channel, exist := m.event[uniqueID]
if !exist {
return nil
}
channel <- msg
delete(m.event, uniqueID)
return nil
}
func (m *multicast) sendReply(msg *DecodedMessage) error {
m.Lock()
if len(m.reply) == 0 {
m.bufReplies = append(m.bufReplies, msg)
m.Unlock()
return nil
}
replyChannel := m.reply[0]
m.reply = m.reply[1:]
m.Unlock()
// The current reply channel has been removed from the list, that's why
// we can be out of the mutex to send through that channel. Indeed, there
// is no risk that someone else tries to write on this channel.
replyChannel <- msg
return nil
}
func (m *multicast) processBufferedReply(channel chan *DecodedMessage) {
m.Lock()
if len(m.bufReplies) == 0 {
m.reply = append(m.reply, channel)
m.Unlock()
return
}
msg := m.bufReplies[0]
m.bufReplies = m.bufReplies[1:]
m.Unlock()
// The current buffered reply message has been removed from the list, and
// the channel have not been added to the reply list, that's why we can be
// out of the mutex to send the buffered message through that channel.
// There is no risk that someone else tries to write this message on another
// channel, or another message on this channel.
channel <- msg
}
func (m *multicast) write(msg *DecodedMessage) error {
switch msg.Code {
case NextCode:
return nil
case ProcessAsyncEventCode:
return m.sendEvent(msg)
default:
return m.sendReply(msg)
}
}
func (m *multicast) listen(containerID, processID string, dataType ctlDataType) (chan *DecodedMessage, error) {
switch dataType {
case replyType:
newChan := make(chan *DecodedMessage)
go m.processBufferedReply(newChan)
return newChan, nil
case eventType:
uniqueID := m.buildEventID(containerID, processID)
_, exist := m.event[uniqueID]
if exist {
return nil, fmt.Errorf("Channel already assigned for ID %s", uniqueID)
}
m.event[uniqueID] = make(chan *DecodedMessage)
return m.event[uniqueID], nil
default:
return nil, fmt.Errorf("Unknown data type: %s", dataType)
}
}

View File

@ -1,260 +0,0 @@
// Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package hyperstart
import (
"syscall"
)
// Defines all available commands to communicate with hyperstart agent.
const (
VersionCode = iota
StartSandboxCode
GetSandboxDeprecatedCode
StopSandboxDeprecatedCode
DestroySandboxCode
RestartContainerDeprecatedCode
ExecCmdCode
FinishCmdDeprecatedCode
ReadyCode
AckCode
ErrorCode
WinsizeCode
PingCode
FinishSandboxDeprecatedCode
NextCode
WriteFileCode
ReadFileCode
NewContainerCode
KillContainerCode
OnlineCPUMemCode
SetupInterfaceCode
SetupRouteCode
RemoveContainerCode
PsContainerCode
ProcessAsyncEventCode
)
// FileCommand is the structure corresponding to the format expected by
// hyperstart to interact with files.
type FileCommand struct {
Container string `json:"container"`
File string `json:"file"`
}
// KillCommand is the structure corresponding to the format expected by
// hyperstart to kill a container on the guest.
type KillCommand struct {
Container string `json:"container"`
Signal syscall.Signal `json:"signal"`
AllProcesses bool `json:"allProcesses"`
}
// ExecCommand is the structure corresponding to the format expected by
// hyperstart to execute a command on the guest.
type ExecCommand struct {
Container string `json:"container,omitempty"`
Process Process `json:"process"`
}
// RemoveCommand is the structure corresponding to the format expected by
// hyperstart to remove a container on the guest.
type RemoveCommand struct {
Container string `json:"container"`
}
// PsCommand is the structure corresponding to the format expected by
// hyperstart to list processes of a container on the guest.
type PsCommand struct {
Container string `json:"container"`
Format string `json:"format"`
PsArgs []string `json:"psargs"`
}
// PAECommand is the structure hyperstart can expects to
// receive after a process has been started/executed on a container.
type PAECommand struct {
Container string `json:"container"`
Process string `json:"process"`
Event string `json:"event"`
Info string `json:"info,omitempty"`
Status int `json:"status,omitempty"`
}
// DecodedMessage is the structure holding messages coming from CTL channel.
type DecodedMessage struct {
Code uint32
Message []byte
}
// TtyMessage is the structure holding messages coming from TTY channel.
type TtyMessage struct {
Session uint64
Message []byte
}
// WindowSizeMessage is the structure corresponding to the format expected by
// hyperstart to resize a container's window.
type WindowSizeMessage struct {
Container string `json:"container"`
Process string `json:"process"`
Row uint16 `json:"row"`
Column uint16 `json:"column"`
}
// VolumeDescriptor describes a volume related to a container.
type VolumeDescriptor struct {
Device string `json:"device"`
Addr string `json:"addr,omitempty"`
Mount string `json:"mount"`
Fstype string `json:"fstype,omitempty"`
ReadOnly bool `json:"readOnly"`
DockerVolume bool `json:"dockerVolume"`
}
// FsmapDescriptor describes a filesystem map related to a container.
type FsmapDescriptor struct {
Source string `json:"source"`
Path string `json:"path"`
ReadOnly bool `json:"readOnly"`
DockerVolume bool `json:"dockerVolume"`
AbsolutePath bool `json:"absolutePath"`
SCSIAddr string `json:"scsiAddr"`
}
// EnvironmentVar holds an environment variable and its value.
type EnvironmentVar struct {
Env string `json:"env"`
Value string `json:"value"`
}
// Rlimit describes a resource limit.
type Rlimit struct {
// Type of the rlimit to set
Type string `json:"type"`
// Hard is the hard limit for the specified type
Hard uint64 `json:"hard"`
// Soft is the soft limit for the specified type
Soft uint64 `json:"soft"`
}
// Capabilities specify the capabilities to keep when executing the process inside the container.
type Capabilities struct {
// Bounding is the set of capabilities checked by the kernel.
Bounding []string `json:"bounding"`
// Effective is the set of capabilities checked by the kernel.
Effective []string `json:"effective"`
// Inheritable is the capabilities preserved across execve.
Inheritable []string `json:"inheritable"`
// Permitted is the limiting superset for effective capabilities.
Permitted []string `json:"permitted"`
// Ambient is the ambient set of capabilities that are kept.
Ambient []string `json:"ambient"`
}
// Process describes a process running on a container inside a sandbox.
type Process struct {
// Args specifies the binary and arguments for the application to execute.
Args []string `json:"args"`
// Rlimits specifies rlimit options to apply to the process.
Rlimits []Rlimit `json:"rlimits,omitempty"`
// Envs populates the process environment for the process.
Envs []EnvironmentVar `json:"envs,omitempty"`
AdditionalGroups []string `json:"additionalGroups,omitempty"`
// Workdir is the current working directory for the process and must be
// relative to the container's root.
Workdir string `json:"workdir"`
User string `json:"user,omitempty"`
Group string `json:"group,omitempty"`
// Sequeue number for stdin and stdout
Stdio uint64 `json:"stdio,omitempty"`
// Sequeue number for stderr if it is not shared with stdout
Stderr uint64 `json:"stderr,omitempty"`
// NoNewPrivileges indicates that the process should not gain any additional privileges
Capabilities Capabilities `json:"capabilities"`
NoNewPrivileges bool `json:"noNewPrivileges"`
// Capabilities specifies the sets of capabilities for the process(es) inside the container.
// Terminal creates an interactive terminal for the process.
Terminal bool `json:"terminal"`
}
// SystemMountsInfo describes additional information for system mounts that the agent
// needs to handle
type SystemMountsInfo struct {
// Indicates if /dev has been passed as a bind mount for the host /dev
BindMountDev bool `json:"bindMountDev"`
// Size of /dev/shm assigned on the host.
DevShmSize int `json:"devShmSize"`
}
// Constraints describes the constrains for a container
type Constraints struct {
// CPUQuota specifies the total amount of time in microseconds
// The number of microseconds per CPUPeriod that the container is guaranteed CPU access
CPUQuota int64
// CPUPeriod specifies the CPU CFS scheduler period of time in microseconds
CPUPeriod uint64
// CPUShares specifies container's weight vs. other containers
CPUShares uint64
}
// Container describes a container running on a sandbox.
type Container struct {
ID string `json:"id"`
Rootfs string `json:"rootfs"`
Fstype string `json:"fstype,omitempty"`
Image string `json:"image"`
SCSIAddr string `json:"scsiAddr,omitempty"`
Volumes []*VolumeDescriptor `json:"volumes,omitempty"`
Fsmap []*FsmapDescriptor `json:"fsmap,omitempty"`
Sysctl map[string]string `json:"sysctl,omitempty"`
Process *Process `json:"process"`
RestartPolicy string `json:"restartPolicy"`
Initialize bool `json:"initialize"`
SystemMountsInfo SystemMountsInfo `json:"systemMountsInfo"`
Constraints Constraints `json:"constraints"`
}
// IPAddress describes an IP address and its network mask.
type IPAddress struct {
IPAddress string `json:"ipAddress"`
NetMask string `json:"netMask"`
}
// NetworkIface describes a network interface to setup on the host.
type NetworkIface struct {
Device string `json:"device,omitempty"`
NewDevice string `json:"newDeviceName,omitempty"`
IPAddresses []IPAddress `json:"ipAddresses"`
MTU int `json:"mtu"`
MACAddr string `json:"macAddr"`
}
// Route describes a route to setup on the host.
type Route struct {
Dest string `json:"dest"`
Gateway string `json:"gateway,omitempty"`
Device string `json:"device,omitempty"`
}
// Sandbox describes the sandbox configuration to start inside the VM.
type Sandbox struct {
Hostname string `json:"hostname"`
Containers []Container `json:"containers,omitempty"`
Interfaces []NetworkIface `json:"interfaces,omitempty"`
DNS []string `json:"dns,omitempty"`
Routes []Route `json:"routes,omitempty"`
ShareDir string `json:"shareDir"`
}

View File

@ -48,10 +48,8 @@ var testHyperstartTtySocket = ""
func cleanUp() {
globalSandboxList.removeSandbox(testSandboxID)
store.DeleteAll()
for _, dir := range []string{testDir, defaultSharedDir} {
os.RemoveAll(dir)
os.MkdirAll(dir, store.DirMode)
}
os.RemoveAll(testDir)
os.MkdirAll(testDir, store.DirMode)
setup()
}

View File

@ -62,15 +62,12 @@ func (c *VMConfig) ToGrpc() (*pb.GrpcVMConfig, error) {
return nil, err
}
var agentConfig []byte
switch aconf := c.AgentConfig.(type) {
case HyperConfig:
agentConfig, err = json.Marshal(&aconf)
case KataAgentConfig:
agentConfig, err = json.Marshal(&aconf)
default:
err = fmt.Errorf("agent type %s is not supported by VM cache", c.AgentType)
aconf, ok := c.AgentConfig.(KataAgentConfig)
if !ok {
return nil, fmt.Errorf("agent type is not supported by VM cache")
}
agentConfig, err := json.Marshal(&aconf)
if err != nil {
return nil, err
}
@ -89,25 +86,15 @@ func GrpcToVMConfig(j *pb.GrpcVMConfig) (*VMConfig, error) {
return nil, err
}
switch config.AgentType {
case HyperstartAgent:
var hyperConfig HyperConfig
err := json.Unmarshal(j.AgentConfig, &hyperConfig)
if err == nil {
config.AgentConfig = hyperConfig
if config.AgentType != KataContainersAgent {
return nil, fmt.Errorf("agent type %s is not supported by VM cache", config.AgentType)
}
case KataContainersAgent:
var kataConfig KataAgentConfig
err := json.Unmarshal(j.AgentConfig, &kataConfig)
err = json.Unmarshal(j.AgentConfig, &kataConfig)
if err == nil {
config.AgentConfig = kataConfig
}
default:
err = fmt.Errorf("agent type %s is not supported by VM cache", config.AgentType)
}
if err != nil {
return nil, err
}
return &config, nil
}