Merge pull request #3687 from luodw/nydus-clh

nydus: add lazyload support for kata with clh
This commit is contained in:
Peng Tao 2022-02-21 19:31:45 +08:00 committed by GitHub
commit 031da99914
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 108 additions and 63 deletions

View File

@ -2,7 +2,7 @@
## Introduction
Refer to [kata-`nydus`-design](../design/kata-nydus-design.md)
Refer to [kata-`nydus`-design](../design/kata-nydus-design.md) for introduction and `nydus` has supported Kata Containers with hypervisor `QEMU` and `CLH` currently.
## How to
@ -16,7 +16,7 @@ You can use Kata Containers with `nydus` as follows,
4. Use [kata-containers](https://github.com/kata-containers/kata-containers) `latest` branch to compile and build `kata-containers.img`;
5. Update `configuration-qemu.toml` to include:
5. Update `configuration-qemu.toml` or `configuration-clh.toml`to include:
```toml
shared_fs = "virtio-fs-nydus"
@ -24,7 +24,7 @@ virtio_fs_daemon = "<nydusd binary path>"
virtio_fs_extra_args = []
```
6. run `crictl run -r kata-qemu nydus-container.yaml nydus-sandbox.yaml`;
6. run `crictl run -r kata nydus-container.yaml nydus-sandbox.yaml`;
The `nydus-sandbox.yaml` looks like below:

View File

@ -163,6 +163,7 @@ DEFENTROPYSOURCE := /dev/urandom
DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"]
DEFDISABLEBLOCK := false
DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs
DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs
DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/kata-qemu/virtiofsd
DEFVALIDVIRTIOFSDAEMONPATHS := [\"$(DEFVIRTIOFSDAEMON)\"]
@ -437,6 +438,7 @@ USER_VARS += DEFDISABLEBLOCK
USER_VARS += DEFBLOCKSTORAGEDRIVER_ACRN
USER_VARS += DEFBLOCKSTORAGEDRIVER_FC
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU
USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS
USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS
USER_VARS += DEFVIRTIOFSDAEMON
USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS

View File

@ -70,6 +70,11 @@ default_memory = @DEFMEMSZ@
# This is will determine the times that memory will be hotadded to sandbox/VM.
#memory_slots = @DEFMEMSLOTS@
# Shared file system type:
# - virtio-fs (default)
# - virtio-fs-nydus
shared_fs = "@DEFSHAREDFS_CLH_VIRTIOFS@"
# Path to vhost-user-fs daemon.
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"

View File

@ -426,7 +426,7 @@ func (h hypervisor) sharedFS() (string, error) {
supportedSharedFS := []string{config.Virtio9P, config.VirtioFS, config.VirtioFSNydus}
if h.SharedFS == "" {
return config.Virtio9P, nil
return config.VirtioFS, nil
}
for _, fs := range supportedSharedFS {
@ -644,14 +644,9 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err
}
if sharedFS == config.VirtioFS && h.VirtioFSDaemon == "" {
if (sharedFS == config.VirtioFS || sharedFS == config.VirtioFSNydus) && h.VirtioFSDaemon == "" {
return vc.HypervisorConfig{},
errors.New("cannot enable virtio-fs without daemon path in configuration file")
}
if sharedFS == config.VirtioFSNydus && h.VirtioFSDaemon == "" {
return vc.HypervisorConfig{},
errors.New("cannot enable virtio nydus without nydusd daemon path in configuration file")
fmt.Errorf("cannot enable %s without daemon path in configuration file", sharedFS)
}
if vSock, err := utils.SupportsVsocks(); !vSock {
@ -822,11 +817,18 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err
}
sharedFS := config.VirtioFS
sharedFS, err := h.sharedFS()
if err != nil {
return vc.HypervisorConfig{}, err
}
if sharedFS != config.VirtioFS && sharedFS != config.VirtioFSNydus {
return vc.HypervisorConfig{}, errors.New("clh only support virtio-fs or virtio-fs-nydus")
}
if h.VirtioFSDaemon == "" {
return vc.HypervisorConfig{},
errors.New("virtio-fs daemon path is missing in configuration file")
fmt.Errorf("cannot enable %s without daemon path in configuration file", sharedFS)
}
return vc.HypervisorConfig{

View File

@ -633,6 +633,8 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
PCIeRootPort: pcieRootPort,
RxRateLimiterMaxRate: rxRateLimiterMaxRate,
TxRateLimiterMaxRate: txRateLimiterMaxRate,
SharedFS: "virtio-fs",
VirtioFSDaemon: filepath.Join(dir, "virtiofsd"),
}
files := []string{hypervisorPath, kernelPath, imagePath}
@ -1388,6 +1390,8 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
Image: "/",
Firmware: "/",
FirmwareVolume: "/",
SharedFS: "virtio-fs",
VirtioFSDaemon: "/usr/libexec/kata-qemu/virtiofsd",
},
},
}

View File

@ -144,27 +144,27 @@ func (c *clhClientApi) VmRemoveDevicePut(ctx context.Context, vmRemoveDevice chc
// Cloud hypervisor state
//
type CloudHypervisorState struct {
apiSocket string
PID int
VirtiofsdPID int
state clhState
apiSocket string
PID int
VirtiofsDaemonPid int
state clhState
}
func (s *CloudHypervisorState) reset() {
s.PID = 0
s.VirtiofsdPID = 0
s.VirtiofsDaemonPid = 0
s.state = clhNotReady
}
type cloudHypervisor struct {
console console.Console
virtiofsd VirtiofsDaemon
APIClient clhClient
ctx context.Context
id string
vmconfig chclient.VmConfig
state CloudHypervisorState
config HypervisorConfig
console console.Console
virtiofsDaemon VirtiofsDaemon
APIClient clhClient
ctx context.Context
id string
vmconfig chclient.VmConfig
state CloudHypervisorState
config HypervisorConfig
}
var clhKernelParams = []Param{
@ -198,6 +198,10 @@ func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error {
return nil
}
func (clh *cloudHypervisor) nydusdAPISocketPath(id string) (string, error) {
return utils.BuildSocketPath(clh.config.VMStorePath, id, nydusdAPISock)
}
// For cloudHypervisor this call only sets the internal structure up.
// The VM will be created and started through StartVM().
func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Network, hypervisorConfig *HypervisorConfig) error {
@ -223,8 +227,8 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
if clh.state.PID > 0 {
clh.Logger().WithField("function", "CreateVM").Info("Sandbox already exist, loading from state")
clh.virtiofsd = &virtiofsd{
PID: clh.state.VirtiofsdPID,
clh.virtiofsDaemon = &virtiofsd{
PID: clh.state.VirtiofsDaemonPid,
sourcePath: hypervisorConfig.SharedPath,
debug: clh.config.Debug,
socketPath: virtiofsdSocketPath,
@ -349,7 +353,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
ApiInternal: chclient.NewAPIClient(cfg).DefaultApi,
}
clh.virtiofsd = &virtiofsd{
clh.virtiofsDaemon = &virtiofsd{
path: clh.config.VirtioFSDaemon,
sourcePath: filepath.Join(GetSharePath(clh.id)),
socketPath: virtiofsdSocketPath,
@ -358,6 +362,25 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
cache: clh.config.VirtioFSCache,
}
if clh.config.SharedFS == config.VirtioFSNydus {
apiSockPath, err := clh.nydusdAPISocketPath(clh.id)
if err != nil {
clh.Logger().WithError(err).Error("Invalid api socket path for nydusd")
return err
}
nd := &nydusd{
path: clh.config.VirtioFSDaemon,
sockPath: virtiofsdSocketPath,
apiSockPath: apiSockPath,
sourcePath: filepath.Join(GetSharePath(clh.id)),
debug: clh.config.Debug,
extraArgs: clh.config.VirtioFSExtraArgs,
startFn: startInShimNS,
}
nd.setupShareDirFn = nd.setupPassthroughFS
clh.virtiofsDaemon = nd
}
if clh.config.SGXEPCSize > 0 {
epcSection := chclient.NewSgxEpcConfig("kata-epc", clh.config.SGXEPCSize)
epcSection.Prefault = func(b bool) *bool { return &b }(true)
@ -389,8 +412,8 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error {
return err
}
if clh.virtiofsd == nil {
return errors.New("Missing virtiofsd configuration")
if clh.virtiofsDaemon == nil {
return errors.New("Missing virtiofsDaemon configuration")
}
// This needs to be done as late as possible, just before launching
@ -402,23 +425,23 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error {
}
defer label.SetProcessLabel("")
if clh.config.SharedFS == config.VirtioFS {
clh.Logger().WithField("function", "StartVM").Info("Starting virtiofsd")
pid, err := clh.virtiofsd.Start(ctx, func() {
if clh.config.SharedFS == config.VirtioFS || clh.config.SharedFS == config.VirtioFSNydus {
clh.Logger().WithField("function", "StartVM").Info("Starting virtiofsDaemon")
pid, err := clh.virtiofsDaemon.Start(ctx, func() {
clh.StopVM(ctx, false)
})
if err != nil {
return err
}
clh.state.VirtiofsdPID = pid
clh.state.VirtiofsDaemonPid = pid
} else {
return errors.New("cloud-hypervisor only supports virtio based file sharing")
}
pid, err := clh.launchClh()
if err != nil {
if shutdownErr := clh.virtiofsd.Stop(ctx); shutdownErr != nil {
clh.Logger().WithError(shutdownErr).Warn("error shutting down Virtiofsd")
if shutdownErr := clh.virtiofsDaemon.Stop(ctx); shutdownErr != nil {
clh.Logger().WithError(shutdownErr).Warn("error shutting down VirtiofsDaemon")
}
return fmt.Errorf("failed to launch cloud-hypervisor: %q", err)
}
@ -759,14 +782,14 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
func (clh *cloudHypervisor) Save() (s hv.HypervisorState) {
s.Pid = clh.state.PID
s.Type = string(ClhHypervisor)
s.VirtiofsDaemonPid = clh.state.VirtiofsdPID
s.VirtiofsDaemonPid = clh.state.VirtiofsDaemonPid
s.APISocket = clh.state.apiSocket
return
}
func (clh *cloudHypervisor) Load(s hv.HypervisorState) {
clh.state.PID = s.Pid
clh.state.VirtiofsdPID = s.VirtiofsDaemonPid
clh.state.VirtiofsDaemonPid = s.VirtiofsDaemonPid
clh.state.apiSocket = s.APISocket
}
@ -790,7 +813,7 @@ func (clh *cloudHypervisor) GetPids() []int {
}
func (clh *cloudHypervisor) GetVirtioFsPid() *int {
return &clh.state.VirtiofsdPID
return &clh.state.VirtiofsDaemonPid
}
func (clh *cloudHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
@ -872,13 +895,13 @@ func (clh *cloudHypervisor) terminate(ctx context.Context, waitOnly bool) (err e
return err
}
if clh.virtiofsd == nil {
return errors.New("virtiofsd config is nil, failed to stop it")
if clh.virtiofsDaemon == nil {
return errors.New("virtiofsDaemon config is nil, failed to stop it")
}
clh.Logger().Debug("stop virtiofsd")
if err = clh.virtiofsd.Stop(ctx); err != nil {
clh.Logger().WithError(err).Error("failed to stop virtiofsd")
clh.Logger().Debug("stop virtiofsDaemon")
if err = clh.virtiofsDaemon.Stop(ctx); err != nil {
clh.Logger().WithError(err).Error("failed to stop virtiofsDaemon")
}
return
@ -1181,7 +1204,7 @@ func (clh *cloudHypervisor) addNet(e Endpoint) error {
// Add shared Volume using virtiofs
func (clh *cloudHypervisor) addVolume(volume types.Volume) error {
if clh.config.SharedFS != config.VirtioFS {
if clh.config.SharedFS != config.VirtioFS && clh.config.SharedFS != config.VirtioFSNydus {
return fmt.Errorf("shared fs method not supported %s", clh.config.SharedFS)
}

View File

@ -296,7 +296,7 @@ func TestClhCreateVM(t *testing.T) {
assert.Exactly(clhConfig, clh.config)
}
func TestClooudHypervisorStartSandbox(t *testing.T) {
func TestCloudHypervisorStartSandbox(t *testing.T) {
assert := assert.New(t)
clhConfig, err := newClhConfig()
assert.NoError(err)
@ -308,9 +308,9 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
clhConfig.RunStorePath = store.RunStoragePath()
clh := &cloudHypervisor{
config: clhConfig,
APIClient: &clhClientMock{},
virtiofsd: &virtiofsdMock{},
config: clhConfig,
APIClient: &clhClientMock{},
virtiofsDaemon: &virtiofsdMock{},
}
err = clh.StartVM(context.Background(), 10)

View File

@ -1284,13 +1284,24 @@ func (k *kataAgent) rollbackFailingContainerCreation(ctx context.Context, c *Con
}
}
func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if sandbox.GetHypervisorType() != string(QemuHypervisor) {
// qemu is supported first, other hypervisors will next
// https://github.com/kata-containers/kata-containers/issues/2724
func getVirtiofsDaemonForNydus(sandbox *Sandbox) (VirtiofsDaemon, error) {
var virtiofsDaemon VirtiofsDaemon
switch sandbox.GetHypervisorType() {
case string(QemuHypervisor):
virtiofsDaemon = sandbox.hypervisor.(*qemu).virtiofsDaemon
case string(ClhHypervisor):
virtiofsDaemon = sandbox.hypervisor.(*cloudHypervisor).virtiofsDaemon
default:
return nil, errNydusdNotSupport
}
q, _ := sandbox.hypervisor.(*qemu)
return virtiofsDaemon, nil
}
func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
virtiofsDaemon, err := getVirtiofsDaemonForNydus(sandbox)
if err != nil {
return nil, err
}
extraOption, err := parseExtraOption(c.rootFs.Options)
if err != nil {
return nil, err
@ -1302,7 +1313,7 @@ func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container
}
k.Logger().Infof("nydus option: %v", extraOption)
// mount lowerdir to guest /run/kata-containers/shared/images/<cid>/lowerdir
if err := q.virtiofsDaemon.Mount(*mountOpt); err != nil {
if err := virtiofsDaemon.Mount(*mountOpt); err != nil {
return nil, err
}
rootfs := &grpc.Storage{}

View File

@ -390,13 +390,11 @@ func bindUnmountContainerSnapshotDir(ctx context.Context, sharedDir, cID string)
func nydusContainerCleanup(ctx context.Context, sharedDir string, c *Container) error {
sandbox := c.sandbox
if sandbox.GetHypervisorType() != string(QemuHypervisor) {
// qemu is supported first, other hypervisors will next
// https://github.com/kata-containers/kata-containers/issues/2724
return errNydusdNotSupport
virtiofsDaemon, err := getVirtiofsDaemonForNydus(sandbox)
if err != nil {
return err
}
q, _ := sandbox.hypervisor.(*qemu)
if err := q.virtiofsDaemon.Umount(rafsMountPath(c.id)); err != nil {
if err := virtiofsDaemon.Umount(rafsMountPath(c.id)); err != nil {
return errors.Wrap(err, "umount rafs failed")
}
if err := bindUnmountContainerSnapshotDir(ctx, sharedDir, c.id); err != nil {

View File

@ -68,7 +68,7 @@ var (
errNydusdSockPathInvalid = errors.New("nydusd sock path is invalid")
errNydusdAPISockPathInvalid = errors.New("nydusd api sock path is invalid")
errNydusdSourcePathInvalid = errors.New("nydusd resource path is invalid")
errNydusdNotSupport = errors.New("nydusd only supports the QEMU hypervisor currently (see https://github.com/kata-containers/kata-containers/issues/2724)")
errNydusdNotSupport = errors.New("nydusd only supports the QEMU/CLH hypervisor currently (see https://github.com/kata-containers/kata-containers/issues/3654)")
)
type nydusd struct {