mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Rename pdName -> volumeId for AWS persistent volumes
This commit is contained in:
parent
aa6051026e
commit
95b68ae0b0
@ -401,20 +401,17 @@ type ISCSIVolumeSource struct {
|
||||
// The disk must also be in the same AWS zone as the kubelet.
|
||||
// A AWS EBS disk can only be mounted as read/write once.
|
||||
type AWSPersistentDiskVolumeSource struct {
|
||||
// Unique name of the PD resource. Used to identify the disk in AWS
|
||||
PDName string `json:"pdName"`
|
||||
|
||||
// Unique id of the persistent disk resource. Used to identify the disk in AWS
|
||||
VolumeId string `json:"volumeId"`
|
||||
// Required: Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs"
|
||||
// TODO: how do we prevent errors in the filesystem from compromising the machine
|
||||
FSType string `json:"fsType,omitempty"`
|
||||
|
||||
// Optional: Partition on the disk to mount.
|
||||
// If omitted, kubelet will attempt to mount the device name.
|
||||
// Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty.
|
||||
Partition int `json:"partition,omitempty"`
|
||||
|
||||
// Optional: Defaults to false (read/write). ReadOnly here will force
|
||||
// the ReadOnly setting in VolumeMounts.
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
|
@ -314,8 +314,8 @@ type ISCSIVolumeSource struct {
|
||||
// The disk must also be in the same AWS zone as the kubelet.
|
||||
// A AWS PD can only be mounted on a single machine.
|
||||
type AWSPersistentDiskVolumeSource struct {
|
||||
// Unique name of the PD resource. Used to identify the disk in AWS
|
||||
PDName string `json:"pdName" description:"unique id of the PD resource in AWS"`
|
||||
// Unique id of the PD resource. Used to identify the disk in AWS
|
||||
VolumeId string `json:"volumeId" description:"unique id of the PD resource in AWS"`
|
||||
// Required: Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs"
|
||||
|
@ -296,8 +296,8 @@ type GCEPersistentDiskVolumeSource struct {
|
||||
// The disk must also be in the same AWS zone as the kubelet.
|
||||
// A AWS PD can only be mounted on a single machine.
|
||||
type AWSPersistentDiskVolumeSource struct {
|
||||
// Unique name of the PD resource. Used to identify the disk in AWS
|
||||
PDName string `json:"pdName" description:"unique id of the PD resource in AWS"`
|
||||
// Unique id of the PD resource. Used to identify the disk in AWS
|
||||
VolumeId string `json:"volumeId" description:"unique id of the PD resource in AWS"`
|
||||
// Required: Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs"
|
||||
|
@ -412,8 +412,8 @@ type GCEPersistentDiskVolumeSource struct {
|
||||
// The disk must also be in the same AWS zone as the kubelet.
|
||||
// A AWS PD can only be mounted on a single machine.
|
||||
type AWSPersistentDiskVolumeSource struct {
|
||||
// Unique name of the PD resource. Used to identify the disk in AWS
|
||||
PDName string `json:"pdName" description:"unique id of the PD resource in AWS"`
|
||||
// Unique id of the PD resource. Used to identify the disk in AWS
|
||||
VolumeId string `json:"volumeId" description:"unique id of the PD resource in AWS"`
|
||||
// Required: Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs"
|
||||
|
@ -374,8 +374,8 @@ func validateGCEPersistentDiskVolumeSource(PD *api.GCEPersistentDiskVolumeSource
|
||||
|
||||
func validateAWSPersistentDiskVolumeSource(PD *api.AWSPersistentDiskVolumeSource) errs.ValidationErrorList {
|
||||
allErrs := errs.ValidationErrorList{}
|
||||
if PD.PDName == "" {
|
||||
allErrs = append(allErrs, errs.NewFieldRequired("pdName"))
|
||||
if PD.VolumeId == "" {
|
||||
allErrs = append(allErrs, errs.NewFieldRequired("volumeId"))
|
||||
}
|
||||
if PD.FSType == "" {
|
||||
allErrs = append(allErrs, errs.NewFieldRequired("fsType"))
|
||||
|
@ -62,12 +62,12 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
||||
}
|
||||
}
|
||||
if volume.AWSPersistentDisk != nil {
|
||||
pdName := volume.AWSPersistentDisk.PDName
|
||||
volumeId := volume.AWSPersistentDisk.VolumeId
|
||||
|
||||
manifest := &(pod.Spec)
|
||||
for ix := range manifest.Volumes {
|
||||
if manifest.Volumes[ix].AWSPersistentDisk != nil &&
|
||||
manifest.Volumes[ix].AWSPersistentDisk.PDName == pdName {
|
||||
manifest.Volumes[ix].AWSPersistentDisk.VolumeId == volumeId {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
{
|
||||
VolumeSource: api.VolumeSource{
|
||||
AWSPersistentDisk: &api.AWSPersistentDiskVolumeSource{
|
||||
PDName: "foo",
|
||||
VolumeId: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -338,7 +338,7 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
{
|
||||
VolumeSource: api.VolumeSource{
|
||||
AWSPersistentDisk: &api.AWSPersistentDiskVolumeSource{
|
||||
PDName: "bar",
|
||||
VolumeId: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -77,7 +77,7 @@ func (plugin *awsPersistentDiskPlugin) NewBuilder(spec *api.Volume, podRef *api.
|
||||
}
|
||||
|
||||
func (plugin *awsPersistentDiskPlugin) newBuilderInternal(spec *api.Volume, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
|
||||
pdName := spec.AWSPersistentDisk.PDName
|
||||
volumeId := spec.AWSPersistentDisk.VolumeId
|
||||
fsType := spec.AWSPersistentDisk.FSType
|
||||
partition := ""
|
||||
if spec.AWSPersistentDisk.Partition != 0 {
|
||||
@ -88,7 +88,7 @@ func (plugin *awsPersistentDiskPlugin) newBuilderInternal(spec *api.Volume, podU
|
||||
return &awsPersistentDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name,
|
||||
pdName: pdName,
|
||||
volumeId: volumeId,
|
||||
fsType: fsType,
|
||||
partition: partition,
|
||||
readOnly: readOnly,
|
||||
@ -128,8 +128,8 @@ type pdManager interface {
|
||||
type awsPersistentDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
// Unique name of the PD, used to find the disk resource in the provider.
|
||||
pdName string
|
||||
// Unique id of the PD, used to find the disk resource in the provider.
|
||||
volumeId string
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies the partition to mount
|
||||
@ -183,7 +183,7 @@ func (pd *awsPersistentDisk) SetUpAt(dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
globalPDPath := makeGlobalPDPath(pd.plugin.host, pd.volumeId)
|
||||
if err := pd.manager.AttachAndMountDisk(pd, globalPDPath); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -232,14 +232,14 @@ func (pd *awsPersistentDisk) SetUpAt(dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
|
||||
func makeGlobalPDPath(host volume.VolumeHost, volumeId string) string {
|
||||
// Clean up the URI to be more fs-friendly
|
||||
name := devName
|
||||
name := volumeId
|
||||
name = strings.Replace(name, "://", "/", -1)
|
||||
return path.Join(host.GetPluginDir(awsPersistentDiskPluginName), "mounts", name)
|
||||
}
|
||||
|
||||
func getPdNameFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
func getVolumeIdFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
|
||||
basePath := path.Join(host.GetPluginDir(awsPersistentDiskPluginName), "mounts")
|
||||
rel, err := filepath.Rel(basePath, globalPath)
|
||||
if err != nil {
|
||||
@ -248,13 +248,13 @@ func getPdNameFromGlobalMount(host volume.VolumeHost, globalPath string) (string
|
||||
if strings.Contains(rel, "../") {
|
||||
return "", fmt.Errorf("Unexpected mount path: " + globalPath)
|
||||
}
|
||||
// Reverse the :// replacement done in makeGlobalPDName
|
||||
pdName := rel
|
||||
if strings.HasPrefix(pdName, "aws/") {
|
||||
pdName = strings.Replace(pdName, "aws/", "aws://", 1)
|
||||
// Reverse the :// replacement done in makeGlobalPDPath
|
||||
volumeId := rel
|
||||
if strings.HasPrefix(volumeId, "aws/") {
|
||||
volumeId = strings.Replace(volumeId, "aws/", "aws://", 1)
|
||||
}
|
||||
glog.V(2).Info("Mapping mount dir ", globalPath, " to pdName ", pdName)
|
||||
return pdName, nil
|
||||
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeId ", volumeId)
|
||||
return volumeId, nil
|
||||
}
|
||||
|
||||
func (pd *awsPersistentDisk) GetPath() string {
|
||||
@ -294,14 +294,14 @@ func (pd *awsPersistentDisk) TearDownAt(dir string) error {
|
||||
// If len(refs) is 1, then all bind mounts have been removed, and the
|
||||
// remaining reference is the global mount. It is safe to detach.
|
||||
if len(refs) == 1 {
|
||||
// pd.pdName is not initially set for volume-cleaners, so set it here.
|
||||
pd.pdName, err = getPdNameFromGlobalMount(pd.plugin.host, refs[0])
|
||||
// pd.volumeId is not initially set for volume-cleaners, so set it here.
|
||||
pd.volumeId, err = getVolumeIdFromGlobalMount(pd.plugin.host, refs[0])
|
||||
if err != nil {
|
||||
glog.V(2).Info("Could not determine pdName from mountpoint ", refs[0], ": ", err)
|
||||
glog.V(2).Info("Could not determine volumeId from mountpoint ", refs[0], ": ", err)
|
||||
return err
|
||||
}
|
||||
if err := pd.manager.DetachDisk(pd); err != nil {
|
||||
glog.V(2).Info("Error detaching disk ", pd.pdName, ": ", err)
|
||||
glog.V(2).Info("Error detaching disk ", pd.volumeId, ": ", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ type fakePDManager struct{}
|
||||
// TODO(jonesdl) To fully test this, we could create a loopback device
|
||||
// and mount that instead.
|
||||
func (fake *fakePDManager) AttachAndMountDisk(pd *awsPersistentDisk, globalPDPath string) error {
|
||||
globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
globalPath := makeGlobalPDPath(pd.plugin.host, pd.volumeId)
|
||||
err := os.MkdirAll(globalPath, 0750)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -81,7 +81,7 @@ func (fake *fakePDManager) AttachAndMountDisk(pd *awsPersistentDisk, globalPDPat
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DetachDisk(pd *awsPersistentDisk) error {
|
||||
globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
globalPath := makeGlobalPDPath(pd.plugin.host, pd.volumeId)
|
||||
err := os.RemoveAll(globalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -101,8 +101,8 @@ func TestPlugin(t *testing.T) {
|
||||
Name: "vol1",
|
||||
VolumeSource: api.VolumeSource{
|
||||
AWSPersistentDisk: &api.AWSPersistentDiskVolumeSource{
|
||||
PDName: "pd",
|
||||
FSType: "ext4",
|
||||
VolumeId: "pd",
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsPersistentDisk, globalPDPath
|
||||
if pd.readOnly {
|
||||
flags = mount.FlagReadOnly
|
||||
}
|
||||
devicePath, err := volumes.AttachDisk("", pd.pdName, pd.readOnly)
|
||||
devicePath, err := volumes.AttachDisk("", pd.volumeId, pd.readOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -89,7 +89,7 @@ func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsPersistentDisk, globalPDPath
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *AWSDiskUtil) DetachDisk(pd *awsPersistentDisk) error {
|
||||
// Unmount the global PD mount, which should be the only one.
|
||||
globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
|
||||
globalPDPath := makeGlobalPDPath(pd.plugin.host, pd.volumeId)
|
||||
if err := pd.mounter.Unmount(globalPDPath, 0); err != nil {
|
||||
glog.V(2).Info("Error unmount dir ", globalPDPath, ": ", err)
|
||||
return err
|
||||
@ -101,11 +101,11 @@ func (util *AWSDiskUtil) DetachDisk(pd *awsPersistentDisk) error {
|
||||
// Detach the disk
|
||||
volumes, err := pd.getVolumeProvider()
|
||||
if err != nil {
|
||||
glog.V(2).Info("Error getting volume provider for pd ", pd.pdName, ": ", err)
|
||||
glog.V(2).Info("Error getting volume provider for volumeId ", pd.volumeId, ": ", err)
|
||||
return err
|
||||
}
|
||||
if err := volumes.DetachDisk("", pd.pdName); err != nil {
|
||||
glog.V(2).Info("Error detaching disk ", pd.pdName, ": ", err)
|
||||
if err := volumes.DetachDisk("", pd.volumeId); err != nil {
|
||||
glog.V(2).Info("Error detaching disk ", pd.volumeId, ": ", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -113,7 +113,7 @@ func (util *AWSDiskUtil) DetachDisk(pd *awsPersistentDisk) error {
|
||||
|
||||
// safe_format_and_mount is a utility script on AWS VMs that probes a persistent disk, and if
|
||||
// necessary formats it before mounting it.
|
||||
// This eliminates the necesisty to format a PD before it is used with a Pod on AWS.
|
||||
// This eliminates the necessity to format a PD before it is used with a Pod on AWS.
|
||||
// TODO: port this script into Go and use it for all Linux platforms
|
||||
type awsSafeFormatAndMount struct {
|
||||
mount.Interface
|
||||
|
@ -274,7 +274,7 @@ func testPDPod(diskName, targetHost string, readOnly bool) *api.Pod {
|
||||
Name: "testpd",
|
||||
VolumeSource: api.VolumeSource{
|
||||
AWSPersistentDisk: &api.AWSPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
VolumeId: diskName,
|
||||
FSType: "ext4",
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user