vc: remove virtc api cli

previously used as a small api client for virtcontainers, virtc
no longer needed.

Fixes #1419

Signed-off-by: Gabi Beyer <gabrielle.n.beyer@intel.com>
This commit is contained in:
Gabi Beyer 2019-03-25 15:15:38 -07:00 committed by Gabi Beyer
parent d4ef9c05d7
commit c658770891
3 changed files with 1 additions and 1155 deletions

View File

@ -8,8 +8,6 @@ PREFIX := /usr
BIN_DIR := $(PREFIX)/bin BIN_DIR := $(PREFIX)/bin
VC_BIN_DIR := $(BIN_DIR)/virtcontainers/bin VC_BIN_DIR := $(BIN_DIR)/virtcontainers/bin
TEST_BIN_DIR := $(VC_BIN_DIR)/test TEST_BIN_DIR := $(VC_BIN_DIR)/test
VIRTC_DIR := hack/virtc
VIRTC_BIN := virtc
HOOK_DIR := hook/mock HOOK_DIR := hook/mock
HOOK_BIN := hook HOOK_BIN := hook
CC_SHIM_DIR := shim/mock/cc-shim CC_SHIM_DIR := shim/mock/cc-shim
@ -35,9 +33,6 @@ all: build binaries
build: build:
$(QUIET_GOBUILD)go build $(go list ./... | grep -v /vendor/) $(QUIET_GOBUILD)go build $(go list ./... | grep -v /vendor/)
virtc:
$(QUIET_GOBUILD)go build -o $(VIRTC_DIR)/$@ $(VIRTC_DIR)/*.go
hook: hook:
$(QUIET_GOBUILD)go build -o $(HOOK_DIR)/$@ $(HOOK_DIR)/*.go $(QUIET_GOBUILD)go build -o $(HOOK_DIR)/$@ $(HOOK_DIR)/*.go
@ -47,7 +42,7 @@ cc-shim:
kata-shim: kata-shim:
$(QUIET_GOBUILD)go build -o $(KATA_SHIM_DIR)/$@ $(KATA_SHIM_DIR)/*.go $(QUIET_GOBUILD)go build -o $(KATA_SHIM_DIR)/$@ $(KATA_SHIM_DIR)/*.go
binaries: virtc hook cc-shim kata-shim binaries: hook cc-shim kata-shim
# #
# Tests # Tests
@ -78,7 +73,6 @@ endef
install: install:
@mkdir -p $(VC_BIN_DIR) @mkdir -p $(VC_BIN_DIR)
$(call INSTALL_EXEC,$(VIRTC_DIR)/$(VIRTC_BIN))
@mkdir -p $(TEST_BIN_DIR) @mkdir -p $(TEST_BIN_DIR)
$(call INSTALL_TEST_EXEC,$(HOOK_DIR)/$(HOOK_BIN)) $(call INSTALL_TEST_EXEC,$(HOOK_DIR)/$(HOOK_BIN))
$(call INSTALL_TEST_EXEC,$(CC_SHIM_DIR)/$(CC_SHIM_BIN)) $(call INSTALL_TEST_EXEC,$(CC_SHIM_DIR)/$(CC_SHIM_BIN))
@ -97,7 +91,6 @@ define UNINSTALL_TEST_EXEC
endef endef
uninstall: uninstall:
$(call UNINSTALL_EXEC,$(VIRTC_BIN))
$(call UNINSTALL_TEST_EXEC,$(HOOK_BIN)) $(call UNINSTALL_TEST_EXEC,$(HOOK_BIN))
$(call UNINSTALL_TEST_EXEC,$(CC_SHIM_BIN)) $(call UNINSTALL_TEST_EXEC,$(CC_SHIM_BIN))
$(call UNINSTALL_TEST_EXEC,$(KATA_SHIM_BIN)) $(call UNINSTALL_TEST_EXEC,$(KATA_SHIM_BIN))
@ -112,7 +105,6 @@ define FILE_SAFE_TO_REMOVE =
$(shell test -e "$(1)" && test "$(1)" != "/" && echo "$(1)") $(shell test -e "$(1)" && test "$(1)" != "/" && echo "$(1)")
endef endef
CLEAN_FILES += $(VIRTC_DIR)/$(VIRTC_BIN)
CLEAN_FILES += $(HOOK_DIR)/$(HOOK_BIN) CLEAN_FILES += $(HOOK_DIR)/$(HOOK_BIN)
CLEAN_FILES += $(SHIM_DIR)/$(CC_SHIM_BIN) CLEAN_FILES += $(SHIM_DIR)/$(CC_SHIM_BIN)
CLEAN_FILES += $(SHIM_DIR)/$(KATA_SHIM_BIN) CLEAN_FILES += $(SHIM_DIR)/$(KATA_SHIM_BIN)
@ -123,7 +115,6 @@ clean:
.PHONY: \ .PHONY: \
all \ all \
build \ build \
virtc \
hook \ hook \
shim \ shim \
binaries \ binaries \

View File

@ -1,240 +0,0 @@
# virtc
`virtc` is a simple command-line tool that serves to demonstrate typical usage of the virtcontainers API.
This is example software; unlike other projects like runc, runv, or rkt, virtcontainers is not a full container runtime.
## Virtc example
Here we explain how to use the sandbox and container API from `virtc` command line.
### Prepare your environment
#### Get your kernel
_Fedora_
```
$ sudo -E dnf config-manager --add-repo http://download.opensuse.org/repositories/home:clearlinux:preview:clear-containers-2.1/Fedora_25/home:clearlinux:preview:clear-containers-2.1.repo
$ sudo dnf install linux-container
```
_Ubuntu_
```
$ sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/clearlinux:/preview:/clear-containers-2.1/xUbuntu_16.10/ /' >> /etc/apt/sources.list.d/cc-oci-runtime.list"
$ sudo apt install linux-container
```
#### Get your image
Retrieve a recent Clear Containers image to make sure it contains a recent version of hyperstart agent.
To download and install the latest image:
```
$ latest_version=$(curl -sL https://download.clearlinux.org/latest)
$ curl -LO "https://download.clearlinux.org/current/clear-${latest_version}-containers.img.xz"
$ unxz clear-${latest_version}-containers.img.xz
$ sudo mkdir -p /usr/share/clear-containers/
$ sudo install --owner root --group root --mode 0644 clear-${latest_version}-containers.img /usr/share/clear-containers/
$ sudo ln -fs /usr/share/clear-containers/clear-${latest_version}-containers.img /usr/share/clear-containers/clear-containers.img
```
#### Get virtc
_Download virtcontainers project_
```
$ go get github.com/kata-containers/runtime/virtcontainers
```
_Build and setup your environment_
```
$ cd $GOPATH/src/github.com/kata-containers/runtime/virtcontainers
$ go build -o virtc hack/virtc/main.go
$ sudo -E bash ./utils/virtcontainers-setup.sh
```
`virtcontainers-setup.sh` setup your environment performing different tasks. Particularly, it creates a __busybox__ bundle, and it creates CNI configuration files needed to run `virtc` with CNI plugins.
### Get cc-proxy (optional)
If you plan to start `virtc` with the hyperstart agent, you will have to use [cc-proxy](https://github.com/clearcontainers/proxy) as a proxy, meaning you have to perform extra steps to setup your environment.
```
$ go get github.com/clearcontainers/proxy
$ cd $GOPATH/src/github.com/clearcontainers/proxy
$ make
$ sudo make install
```
If you want to see the traces from the proxy when `virtc` will run, you can manually start it with appropriate debug level:
```
$ sudo /usr/libexec/clearcontainers/cc-proxy -v 3
```
This will generate output similar to the following:
```
I0410 08:58:49.058881 5384 proxy.go:521] listening on /var/run/clearcontainers/proxy.sock
I0410 08:58:49.059044 5384 proxy.go:566] proxy started
```
The proxy socket specified in the example log output has to be used as `virtc`'s `--proxy-url` option.
### Get cc-shim (optional)
If you plan to start `virtc` with the hyperstart agent (implying the use of `cc-proxy` as a proxy), you will have to rely on [cc-shim](https://github.com/clearcontainers/shim) in order to interact with the process running inside your container.
First, you will have to perform extra steps to setup your environment.
```
$ go get github.com/clearcontainers/shim
$ cd $GOPATH/src/github.com/clearcontainers/shim && ./autogen.sh
$ make
$ sudo make install
```
The shim will be installed at the following location: `/usr/libexec/clear-containers/cc-shim`. There will be three cases where you will be able to interact with your container's process through `cc-shim`:
_Start a new container_
```
# ./virtc container start --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
_Execute a new process on a running container_
```
# ./virtc container enter --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
_Start a sandbox with container(s) previously created_
```
# ./virtc sandbox start --id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
Notice that in both cases, the `--sandbox-id` and `--id` options have been defined when previously creating a sandbox and a container.
### Run virtc
All following commands __MUST__ be run as root. By default, and unless you decide to modify it and rebuild it, `virtc` starts empty sandboxes (no container started).
#### Run a new sandbox (Create + Start)
```
# ./virtc sandbox run --agent="hyperstart" --network="CNI" --proxy="ccProxy" --proxy-url="unix:///var/run/clearcontainers/proxy.sock" --shim="ccShim" --shim-path="/usr/libexec/cc-shim"
```
#### Create a new sandbox
```
# ./virtc sandbox run --agent="hyperstart" --network="CNI" --proxy="ccProxy" --proxy-url="unix:///var/run/clearcontainers/proxy.sock" --shim="ccShim" --shim-path="/usr/libexec/cc-shim"
```
This will generate output similar to the following:
```
Sandbox 306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 created
```
#### Start an existing sandbox
```
# ./virtc sandbox start --id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Sandbox 306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 started
```
#### Stop an existing sandbox
```
# ./virtc sandbox stop --id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Sandbox 306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 stopped
```
#### Get the status of an existing sandbox and its containers
```
# ./virtc sandbox status --id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following (assuming the sandbox has been started):
```
SB ID STATE HYPERVISOR AGENT
306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 running qemu hyperstart
CONTAINER ID STATE
```
#### Delete an existing sandbox
```
# ./virtc sandbox delete --id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Sandbox 306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 deleted
```
#### List all existing sandboxes
```
# ./virtc sandbox list
```
This should generate that kind of output
```
SB ID STATE HYPERVISOR AGENT
306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 running qemu hyperstart
92d73f74-4514-4a0d-81df-db1cc4c59100 running qemu hyperstart
7088148c-049b-4be7-b1be-89b3ae3c551c ready qemu hyperstart
6d57654e-4804-4a91-b72d-b5fe375ed3e1 ready qemu hyperstart
```
#### Create a new container
```
# ./virtc container create --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 --rootfs="/tmp/bundles/busybox/rootfs" --cmd="/bin/ifconfig" --console="/dev/pts/30"
```
This will generate output similar to the following:
```
Container 1 created
```
__Note:__ The option `--console` can be any existing console.
Don't try to provide `$(tty)` as it is your current console, and you would not be
able to get your console back as the shim would be listening to this indefinitely.
Instead, you would prefer to open a new shell and get the `$(tty)` from this shell.
That way, you make sure you have a dedicated input/output terminal.
#### Start an existing container
```
# ./virtc container start --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Container 1 started
```
#### Run a new process on an existing container
```
# ./virtc container enter --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8 --cmd="/bin/ps" --console="/dev/pts/30"
```
This will generate output similar to the following:
```
Container 1 entered
```
__Note:__ The option `--console` can be any existing console.
Don't try to provide `$(tty)` as it is your current console, and you would not be
able to get your console back as the shim would be listening to this indefinitely.
Instead, you would prefer to open a new shell and get the `$(tty)` from this shell.
That way, you make sure you have a dedicated input/output terminal.
#### Stop an existing container
```
# ./virtc container stop --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Container 1 stopped
```
#### Delete an existing container
```
# ./virtc container delete --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following:
```
Container 1 deleted
```
#### Get the status of an existing container
```
# ./virtc container status --id=1 --sandbox-id=306ecdcf-0a6f-4a06-a03e-86a7b868ffc8
```
This will generate output similar to the following (assuming the container has been started):
```
CONTAINER ID STATE
1 running
```

View File

@ -1,905 +0,0 @@
// Copyright (c) 2016 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"context"
"errors"
"fmt"
"os"
"strings"
"text/tabwriter"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
vc "github.com/kata-containers/runtime/virtcontainers"
)
var virtcLog *logrus.Entry
var listFormat = "%s\t%s\t%s\t%s\n"
var statusFormat = "%s\t%s\n"
var ctx = context.Background()
var (
errNeedContainerID = errors.New("Container ID cannot be empty")
errNeedSandboxID = errors.New("Sandbox ID cannot be empty")
)
var sandboxConfigFlags = []cli.Flag{
cli.GenericFlag{
Name: "agent",
Value: new(vc.AgentType),
Usage: "the guest agent",
},
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier (default: auto-generated)",
},
cli.StringFlag{
Name: "machine-type",
Value: vc.QemuPC,
Usage: "hypervisor machine type",
},
cli.GenericFlag{
Name: "proxy",
Value: new(vc.ProxyType),
Usage: "the agent's proxy",
},
cli.StringFlag{
Name: "proxy-path",
Value: "",
Usage: "path to proxy binary",
},
cli.GenericFlag{
Name: "shim",
Value: new(vc.ShimType),
Usage: "the shim type",
},
cli.StringFlag{
Name: "shim-path",
Value: "",
Usage: "the shim binary path",
},
cli.UintFlag{
Name: "cpus",
Value: 0,
Usage: "the number of virtual cpus available for this sandbox",
},
cli.UintFlag{
Name: "memory",
Value: 0,
Usage: "the amount of memory available for this sandbox in MiB",
},
}
var ccKernelParams = []vc.Param{
{
Key: "init",
Value: "/usr/lib/systemd/systemd",
},
{
Key: "systemd.unit",
Value: "clear-containers.target",
},
{
Key: "systemd.mask",
Value: "systemd-networkd.service",
},
{
Key: "systemd.mask",
Value: "systemd-networkd.socket",
},
}
func buildKernelParams(config *vc.HypervisorConfig) error {
for _, p := range ccKernelParams {
if err := config.AddKernelParam(p); err != nil {
return err
}
}
return nil
}
func buildSandboxConfig(context *cli.Context) (vc.SandboxConfig, error) {
var agConfig interface{}
proxyPath := context.String("proxy-path")
shimPath := context.String("shim-path")
machineType := context.String("machine-type")
vmMemory := context.Uint("vm-memory")
agentType, ok := context.Generic("agent").(*vc.AgentType)
if !ok {
return vc.SandboxConfig{}, fmt.Errorf("Could not convert agent type")
}
proxyType, ok := context.Generic("proxy").(*vc.ProxyType)
if !ok {
return vc.SandboxConfig{}, fmt.Errorf("Could not convert proxy type")
}
shimType, ok := context.Generic("shim").(*vc.ShimType)
if !ok {
return vc.SandboxConfig{}, fmt.Errorf("Could not convert shim type")
}
kernelPath := "/usr/share/clear-containers/vmlinuz.container"
if machineType == vc.QemuPCLite {
kernelPath = "/usr/share/clear-containers/vmlinux.container"
}
hypervisorConfig := vc.HypervisorConfig{
KernelPath: kernelPath,
ImagePath: "/usr/share/clear-containers/clear-containers.img",
HypervisorMachineType: machineType,
MemorySize: uint32(vmMemory),
}
if err := buildKernelParams(&hypervisorConfig); err != nil {
return vc.SandboxConfig{}, err
}
netConfig := vc.NetworkConfig{}
agConfig = nil
proxyConfig := getProxyConfig(*proxyType, proxyPath)
shimConfig := getShimConfig(*shimType, shimPath)
id := context.String("id")
if id == "" {
// auto-generate sandbox name
id = uuid.Generate().String()
}
sandboxConfig := vc.SandboxConfig{
ID: id,
HypervisorType: vc.QemuHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: *agentType,
AgentConfig: agConfig,
NetworkConfig: netConfig,
ProxyType: *proxyType,
ProxyConfig: proxyConfig,
ShimType: *shimType,
ShimConfig: shimConfig,
Containers: []vc.ContainerConfig{},
}
return sandboxConfig, nil
}
func getProxyConfig(proxyType vc.ProxyType, path string) vc.ProxyConfig {
var proxyConfig vc.ProxyConfig
switch proxyType {
case vc.KataProxyType:
fallthrough
case vc.CCProxyType:
proxyConfig = vc.ProxyConfig{
Path: path,
}
}
return proxyConfig
}
func getShimConfig(shimType vc.ShimType, path string) interface{} {
var shimConfig interface{}
switch shimType {
case vc.CCShimType, vc.KataShimType:
shimConfig = vc.ShimConfig{
Path: path,
}
default:
shimConfig = nil
}
return shimConfig
}
// checkRequiredSandboxArgs checks to ensure the required command-line
// arguments have been specified for the sandbox sub-command specified by
// the context argument.
func checkRequiredSandboxArgs(context *cli.Context) error {
if context == nil {
return fmt.Errorf("BUG: need Context")
}
// sub-sub-command name
name := context.Command.Name
switch name {
case "create":
fallthrough
case "list":
fallthrough
case "run":
// these commands don't require any arguments
return nil
}
id := context.String("id")
if id == "" {
return errNeedSandboxID
}
return nil
}
// checkRequiredContainerArgs checks to ensure the required command-line
// arguments have been specified for the container sub-command specified
// by the context argument.
func checkRequiredContainerArgs(context *cli.Context) error {
if context == nil {
return fmt.Errorf("BUG: need Context")
}
// sub-sub-command name
name := context.Command.Name
sandboxID := context.String("sandbox-id")
if sandboxID == "" {
return errNeedSandboxID
}
rootfs := context.String("rootfs")
if name == "create" && rootfs == "" {
return fmt.Errorf("%s: need rootfs", name)
}
id := context.String("id")
if id == "" {
return errNeedContainerID
}
return nil
}
func runSandbox(context *cli.Context) error {
sandboxConfig, err := buildSandboxConfig(context)
if err != nil {
return fmt.Errorf("Could not build sandbox config: %s", err)
}
_, err = vc.RunSandbox(ctx, sandboxConfig, nil)
if err != nil {
return fmt.Errorf("Could not run sandbox: %s", err)
}
return nil
}
func createSandbox(context *cli.Context) error {
sandboxConfig, err := buildSandboxConfig(context)
if err != nil {
return fmt.Errorf("Could not build sandbox config: %s", err)
}
p, err := vc.CreateSandbox(ctx, sandboxConfig, nil)
if err != nil {
return fmt.Errorf("Could not create sandbox: %s", err)
}
fmt.Printf("Sandbox %s created\n", p.ID())
return nil
}
func checkSandboxArgs(context *cli.Context, f func(context *cli.Context) error) error {
if err := checkRequiredSandboxArgs(context); err != nil {
return err
}
return f(context)
}
func checkContainerArgs(context *cli.Context, f func(context *cli.Context) error) error {
if err := checkRequiredContainerArgs(context); err != nil {
return err
}
return f(context)
}
func deleteSandbox(context *cli.Context) error {
p, err := vc.DeleteSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not delete sandbox: %s", err)
}
fmt.Printf("Sandbox %s deleted\n", p.ID())
return nil
}
func startSandbox(context *cli.Context) error {
p, err := vc.StartSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not start sandbox: %s", err)
}
fmt.Printf("Sandbox %s started\n", p.ID())
return nil
}
func stopSandbox(context *cli.Context) error {
p, err := vc.StopSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not stop sandbox: %s", err)
}
fmt.Printf("Sandbox %s stopped\n", p.ID())
return nil
}
func pauseSandbox(context *cli.Context) error {
p, err := vc.PauseSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not pause sandbox: %s", err)
}
fmt.Printf("Sandbox %s paused\n", p.ID())
return nil
}
func resumeSandbox(context *cli.Context) error {
p, err := vc.ResumeSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not resume sandbox: %s", err)
}
fmt.Printf("Sandbox %s resumed\n", p.ID())
return nil
}
func listSandboxes(context *cli.Context) error {
sandboxStatusList, err := vc.ListSandbox(ctx)
if err != nil {
return fmt.Errorf("Could not list sandbox: %s", err)
}
w := tabwriter.NewWriter(os.Stdout, 2, 8, 1, '\t', 0)
fmt.Fprintf(w, listFormat, "SB ID", "STATE", "HYPERVISOR", "AGENT")
for _, sandboxStatus := range sandboxStatusList {
fmt.Fprintf(w, listFormat,
sandboxStatus.ID, sandboxStatus.State.State, sandboxStatus.Hypervisor, sandboxStatus.Agent)
}
w.Flush()
return nil
}
func statusSandbox(context *cli.Context) error {
sandboxStatus, err := vc.StatusSandbox(ctx, context.String("id"))
if err != nil {
return fmt.Errorf("Could not get sandbox status: %s", err)
}
w := tabwriter.NewWriter(os.Stdout, 2, 8, 1, '\t', 0)
fmt.Fprintf(w, listFormat, "SB ID", "STATE", "HYPERVISOR", "AGENT")
fmt.Fprintf(w, listFormat+"\n",
sandboxStatus.ID, sandboxStatus.State.State, sandboxStatus.Hypervisor, sandboxStatus.Agent)
fmt.Fprintf(w, statusFormat, "CONTAINER ID", "STATE")
for _, contStatus := range sandboxStatus.ContainersStatus {
fmt.Fprintf(w, statusFormat, contStatus.ID, contStatus.State.State)
}
w.Flush()
return nil
}
var runSandboxCommand = cli.Command{
Name: "run",
Usage: "run a sandbox",
Flags: sandboxConfigFlags,
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, runSandbox)
},
}
var createSandboxCommand = cli.Command{
Name: "create",
Usage: "create a sandbox",
Flags: sandboxConfigFlags,
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, createSandbox)
},
}
var deleteSandboxCommand = cli.Command{
Name: "delete",
Usage: "delete an existing sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, deleteSandbox)
},
}
var startSandboxCommand = cli.Command{
Name: "start",
Usage: "start an existing sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, startSandbox)
},
}
var stopSandboxCommand = cli.Command{
Name: "stop",
Usage: "stop an existing sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, stopSandbox)
},
}
var listSandboxesCommand = cli.Command{
Name: "list",
Usage: "list all existing sandboxes",
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, listSandboxes)
},
}
var statusSandboxCommand = cli.Command{
Name: "status",
Usage: "returns a detailed sandbox status",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, statusSandbox)
},
}
var pauseSandboxCommand = cli.Command{
Name: "pause",
Usage: "pause an existing sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, pauseSandbox)
},
}
var resumeSandboxCommand = cli.Command{
Name: "resume",
Usage: "unpause a paused sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkSandboxArgs(context, resumeSandbox)
},
}
func createContainer(context *cli.Context) error {
console := context.String("console")
interactive := false
if console != "" {
interactive = true
}
envs := []types.EnvVar{
{
Var: "PATH",
Value: "/bin:/usr/bin:/sbin:/usr/sbin",
},
}
cmd := types.Cmd{
Args: strings.Split(context.String("cmd"), " "),
Envs: envs,
WorkDir: "/",
Interactive: interactive,
Console: console,
}
id := context.String("id")
if id == "" {
// auto-generate container name
id = uuid.Generate().String()
}
containerConfig := vc.ContainerConfig{
ID: id,
RootFs: vc.RootFs{Target: context.String("rootfs"), Mounted: true},
Cmd: cmd,
}
_, c, err := vc.CreateContainer(ctx, context.String("sandbox-id"), containerConfig)
if err != nil {
return fmt.Errorf("Could not create container: %s", err)
}
fmt.Printf("Container %s created\n", c.ID())
return nil
}
func deleteContainer(context *cli.Context) error {
c, err := vc.DeleteContainer(ctx, context.String("sandbox-id"), context.String("id"))
if err != nil {
return fmt.Errorf("Could not delete container: %s", err)
}
fmt.Printf("Container %s deleted\n", c.ID())
return nil
}
func startContainer(context *cli.Context) error {
c, err := vc.StartContainer(ctx, context.String("sandbox-id"), context.String("id"))
if err != nil {
return fmt.Errorf("Could not start container: %s", err)
}
fmt.Printf("Container %s started\n", c.ID())
return nil
}
func stopContainer(context *cli.Context) error {
c, err := vc.StopContainer(ctx, context.String("sandbox-id"), context.String("id"))
if err != nil {
return fmt.Errorf("Could not stop container: %s", err)
}
fmt.Printf("Container %s stopped\n", c.ID())
return nil
}
func enterContainer(context *cli.Context) error {
console := context.String("console")
interactive := false
if console != "" {
interactive = true
}
envs := []types.EnvVar{
{
Var: "PATH",
Value: "/bin:/usr/bin:/sbin:/usr/sbin",
},
}
cmd := types.Cmd{
Args: strings.Split(context.String("cmd"), " "),
Envs: envs,
WorkDir: "/",
Interactive: interactive,
Console: console,
}
_, c, _, err := vc.EnterContainer(ctx, context.String("sandbox-id"), context.String("id"), cmd)
if err != nil {
return fmt.Errorf("Could not enter container: %s", err)
}
fmt.Printf("Container %s entered\n", c.ID())
return nil
}
func statusContainer(context *cli.Context) error {
contStatus, err := vc.StatusContainer(ctx, context.String("sandbox-id"), context.String("id"))
if err != nil {
return fmt.Errorf("Could not get container status: %s", err)
}
w := tabwriter.NewWriter(os.Stdout, 2, 8, 1, '\t', 0)
fmt.Fprintf(w, statusFormat, "CONTAINER ID", "STATE")
fmt.Fprintf(w, statusFormat, contStatus.ID, contStatus.State.State)
w.Flush()
return nil
}
var createContainerCommand = cli.Command{
Name: "create",
Usage: "create a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier (default: auto-generated)",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
cli.StringFlag{
Name: "rootfs",
Value: "",
Usage: "the container rootfs directory",
},
cli.StringFlag{
Name: "cmd",
Value: "",
Usage: "the command executed inside the container",
},
cli.StringFlag{
Name: "console",
Value: "",
Usage: "the container console",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, createContainer)
},
}
var deleteContainerCommand = cli.Command{
Name: "delete",
Usage: "delete an existing container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, deleteContainer)
},
}
var startContainerCommand = cli.Command{
Name: "start",
Usage: "start an existing container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, startContainer)
},
}
var stopContainerCommand = cli.Command{
Name: "stop",
Usage: "stop an existing container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, stopContainer)
},
}
var enterContainerCommand = cli.Command{
Name: "enter",
Usage: "enter an existing container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
cli.StringFlag{
Name: "cmd",
Value: "echo",
Usage: "the command executed inside the container",
},
cli.StringFlag{
Name: "console",
Value: "",
Usage: "the process console",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, enterContainer)
},
}
var statusContainerCommand = cli.Command{
Name: "status",
Usage: "returns detailed container status",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "the container identifier",
},
cli.StringFlag{
Name: "sandbox-id",
Value: "",
Usage: "the sandbox identifier",
},
},
Action: func(context *cli.Context) error {
return checkContainerArgs(context, statusContainer)
},
}
func main() {
cli.VersionFlag = cli.BoolFlag{
Name: "version",
Usage: "print the version",
}
virtc := cli.NewApp()
virtc.Name = "VirtContainers CLI"
virtc.Version = "0.0.1"
virtc.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output for logging",
},
cli.StringFlag{
Name: "log",
Value: "",
Usage: "set the log file path where internal debug information is written",
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
}
virtc.Commands = []cli.Command{
{
Name: "sandbox",
Usage: "sandbox commands",
Subcommands: []cli.Command{
createSandboxCommand,
deleteSandboxCommand,
listSandboxesCommand,
pauseSandboxCommand,
resumeSandboxCommand,
runSandboxCommand,
startSandboxCommand,
stopSandboxCommand,
statusSandboxCommand,
},
},
{
Name: "container",
Usage: "container commands",
Subcommands: []cli.Command{
createContainerCommand,
deleteContainerCommand,
startContainerCommand,
stopContainerCommand,
enterContainerCommand,
statusContainerCommand,
},
},
}
virtc.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
virtcLog.Level = logrus.DebugLevel
}
if path := context.GlobalString("log"); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0640)
if err != nil {
return err
}
virtcLog.Logger.Out = f
}
switch context.GlobalString("log-format") {
case "text":
// retain logrus's default.
case "json":
virtcLog.Logger.Formatter = new(logrus.JSONFormatter)
default:
return fmt.Errorf("unknown log-format %q", context.GlobalString("log-format"))
}
// Set virtcontainers logger.
vc.SetLogger(ctx, virtcLog)
return nil
}
err := virtc.Run(os.Args)
if err != nil {
virtcLog.Fatal(err)
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}