mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-17 14:58:16 +00:00
runtime: add initdata support in clh
Prepare the initdata image and mount it as a block device. Signed-off-by: Saul Paredes <saulparedes@microsoft.com>
This commit is contained in:
@@ -686,6 +686,25 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := setupInitdata(clh, hypervisorConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupInitdata prepares and attaches the initdata disk if present.
|
||||||
|
func setupInitdata(clh *cloudHypervisor, hypervisorConfig *HypervisorConfig) error {
|
||||||
|
if len(hypervisorConfig.Initdata) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := prepareInitdataMount(clh.Logger(), clh.id, hypervisorConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
clh.addInitdataDisk(hypervisorConfig.InitdataImage)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -853,6 +872,44 @@ func clhPciInfoToPath(pciInfo chclient.PciDeviceInfo) (types.PciPath, error) {
|
|||||||
return types.PciPathFromString(tokens[0])
|
return types.PciPathFromString(tokens[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addInitdataDisk attaches initdataImage to the CLH VM as a read-only virtio-blk disk.
|
||||||
|
// It builds a DiskConfig (Readonly=true, VhostUser=false), sets one queue per vCPU
|
||||||
|
// with queue size 1024, applies Direct-I/O/IOMMU/rate-limiter from clh.config, and
|
||||||
|
// appends the disk to the pending VM config (no hotplug).
|
||||||
|
func (clh *cloudHypervisor) addInitdataDisk(initdataImage string) {
|
||||||
|
disk := chclient.NewDiskConfig()
|
||||||
|
disk.Path = &initdataImage
|
||||||
|
|
||||||
|
ro := true
|
||||||
|
disk.Readonly = &ro
|
||||||
|
|
||||||
|
// Use virtio-blk
|
||||||
|
vu := false
|
||||||
|
disk.VhostUser = &vu
|
||||||
|
|
||||||
|
// Reasonable queues; mirror your hotplug path
|
||||||
|
queues := int32(clh.config.NumVCPUs())
|
||||||
|
qsz := int32(1024)
|
||||||
|
disk.NumQueues = &queues
|
||||||
|
disk.QueueSize = &qsz
|
||||||
|
|
||||||
|
// Honor runtime settings
|
||||||
|
if clh.config.BlockDeviceCacheSet {
|
||||||
|
disk.Direct = &clh.config.BlockDeviceCacheDirect
|
||||||
|
}
|
||||||
|
disk.SetIommu(clh.config.IOMMU)
|
||||||
|
|
||||||
|
if rl := clh.getDiskRateLimiterConfig(); rl != nil {
|
||||||
|
disk.SetRateLimiterConfig(*rl)
|
||||||
|
}
|
||||||
|
|
||||||
|
if clh.vmconfig.Disks != nil {
|
||||||
|
*clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk)
|
||||||
|
} else {
|
||||||
|
clh.vmconfig.Disks = &[]chclient.DiskConfig{*disk}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) error {
|
func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) error {
|
||||||
if drive.Swap {
|
if drive.Swap {
|
||||||
return fmt.Errorf("cloudHypervisor doesn't support swap")
|
return fmt.Errorf("cloudHypervisor doesn't support swap")
|
||||||
@@ -1810,6 +1867,15 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
|||||||
}).Debug("successfully removed the non root user")
|
}).Debug("successfully removed the non root user")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we have initdata, we should drop initdata image path
|
||||||
|
hypervisorConfig := clh.HypervisorConfig()
|
||||||
|
if len(hypervisorConfig.Initdata) > 0 {
|
||||||
|
initdataWorkdir := filepath.Join(string(filepath.Separator), "/run/kata-containers/shared/initdata", clh.id)
|
||||||
|
if err := os.RemoveAll(initdataWorkdir); err != nil {
|
||||||
|
clh.Logger().WithError(err).Warnf("failed to remove initdata work dir %s", initdataWorkdir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
clh.reset()
|
clh.reset()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
Reference in New Issue
Block a user