mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Typos and englishify pkg/volume
This commit is contained in:
parent
158dc1a863
commit
f0988b95e7
@ -66,7 +66,7 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attach attaches a volume.Spec to a Azure VM referenced by NodeName, returning the disk's LUN
|
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
|
||||||
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||||
volumeSource, err := getVolumeSource(spec)
|
volumeSource, err := getVolumeSource(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -67,7 +67,7 @@ func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
|
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
|
||||||
// it mounts it it to globalPDPath.
|
// it mounts it to globalPDPath.
|
||||||
// We create a dummy directory (="device") and bind-mount it to globalPDPath
|
// We create a dummy directory (="device") and bind-mount it to globalPDPath
|
||||||
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||||
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
type CinderDiskUtil struct{}
|
type CinderDiskUtil struct{}
|
||||||
|
|
||||||
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
|
||||||
// Mounts the disk to it's global path.
|
// Mounts the disk to its global path.
|
||||||
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
|
||||||
options := []string{}
|
options := []string{}
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
|
@ -218,7 +218,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
|||||||
}
|
}
|
||||||
physicalMounter.ResetLog()
|
physicalMounter.ResetLog()
|
||||||
|
|
||||||
// Make a unmounter for the volume
|
// Make an unmounter for the volume
|
||||||
teardownMedium := mediumUnknown
|
teardownMedium := mediumUnknown
|
||||||
if config.medium == api.StorageMediumMemory {
|
if config.medium == api.StorageMediumMemory {
|
||||||
teardownMedium = mediumMemory
|
teardownMedium = mediumMemory
|
||||||
|
@ -61,7 +61,7 @@ func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetWWNs are the FibreChannel target world wide names
|
// TargetWWNs are the FibreChannel target worldwide names
|
||||||
return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil
|
return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ func udevadmChangeToNewDrives(sdBeforeSet sets.String) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calls "udevadm trigger --action=change" on the specified drive.
|
// Calls "udevadm trigger --action=change" on the specified drive.
|
||||||
// drivePath must be the the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
|
// drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
|
||||||
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
|
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
|
||||||
func udevadmChangeToDrive(drivePath string) error {
|
func udevadmChangeToDrive(drivePath string) error {
|
||||||
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
|
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
|
||||||
|
@ -290,7 +290,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
|
|
||||||
// adding log-level ERROR to remove noise
|
// adding log-level ERROR to remove noise
|
||||||
// and more specific log path so each pod has
|
// and more specific log path so each pod has
|
||||||
// it's own log based on PV + Pod
|
// its own log based on PV + Pod
|
||||||
log := path.Join(p, b.pod.Name+"-glusterfs.log")
|
log := path.Join(p, b.pod.Name+"-glusterfs.log")
|
||||||
options = append(options, "log-level=ERROR")
|
options = append(options, "log-level=ERROR")
|
||||||
options = append(options, "log-file="+log)
|
options = append(options, "log-file="+log)
|
||||||
@ -313,7 +313,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Failed mount scenario.
|
// Failed mount scenario.
|
||||||
// Since gluster does not return eror text
|
// Since gluster does not return error text
|
||||||
// it all goes in a log file, we will read the log file
|
// it all goes in a log file, we will read the log file
|
||||||
logerror := readGlusterLog(log, b.pod.Name)
|
logerror := readGlusterLog(log, b.pod.Name)
|
||||||
if logerror != nil {
|
if logerror != nil {
|
||||||
|
@ -80,7 +80,7 @@ func TestRecycler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to make a new Recyler: %v", err)
|
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||||
}
|
}
|
||||||
if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path {
|
if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path {
|
||||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath())
|
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath())
|
||||||
|
@ -241,5 +241,5 @@ func getVolumeSource(spec *volume.Spec) (*api.ISCSIVolumeSource, bool, error) {
|
|||||||
return spec.PersistentVolume.Spec.ISCSI, spec.ReadOnly, nil
|
return spec.PersistentVolume.Spec.ISCSI, spec.ReadOnly, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, false, fmt.Errorf("Spec does not reference a ISCSI volume type")
|
return nil, false, fmt.Errorf("Spec does not reference an ISCSI volume type")
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ func TestRecycler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to make a new Recyler: %v", err)
|
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||||
}
|
}
|
||||||
if recycler.GetPath() != spec.PersistentVolume.Spec.NFS.Path {
|
if recycler.GetPath() != spec.PersistentVolume.Spec.NFS.Path {
|
||||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.NFS.Path, recycler.GetPath())
|
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.NFS.Path, recycler.GetPath())
|
||||||
|
@ -240,7 +240,7 @@ func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
|
|
||||||
//if a trailling slash is missing we add it here
|
//if a trailing slash is missing we add it here
|
||||||
if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", options); err != nil {
|
if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", options); err != nil {
|
||||||
return fmt.Errorf("quobyte: mount failed: %v", err)
|
return fmt.Errorf("quobyte: mount failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -390,11 +390,11 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*api.PersistentVolume,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !validateRegistry(provisioner.registry) {
|
if !validateRegistry(provisioner.registry) {
|
||||||
return nil, fmt.Errorf("Quoybte registry missing or malformed: must be a host:port pair or multiple pairs seperated by commas")
|
return nil, fmt.Errorf("Quoybte registry missing or malformed: must be a host:port pair or multiple pairs separated by commas")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(apiServer) == 0 {
|
if len(apiServer) == 0 {
|
||||||
return nil, fmt.Errorf("Quoybte API server missing or malformed: must be a http(s)://host:port pair or multiple pairs seperated by commas")
|
return nil, fmt.Errorf("Quoybte API server missing or malformed: must be a http(s)://host:port pair or multiple pairs separated by commas")
|
||||||
}
|
}
|
||||||
|
|
||||||
// create random image name
|
// create random image name
|
||||||
|
@ -371,7 +371,7 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
glog.Errorf("failed to delete rbd image, error %v ouput %v", err, string(output))
|
glog.Errorf("failed to delete rbd image, error %v output %v", err, string(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -96,7 +96,7 @@ const (
|
|||||||
// 5. The payload is written to the new timestamped directory
|
// 5. The payload is written to the new timestamped directory
|
||||||
// 6. Symlinks and directory for new user-visible files are created (if needed).
|
// 6. Symlinks and directory for new user-visible files are created (if needed).
|
||||||
//
|
//
|
||||||
// For example consider the files:
|
// For example, consider the files:
|
||||||
// <target-dir>/podName
|
// <target-dir>/podName
|
||||||
// <target-dir>/user/labels
|
// <target-dir>/user/labels
|
||||||
// <target-dir>/k8s/annotations
|
// <target-dir>/k8s/annotations
|
||||||
|
@ -97,7 +97,7 @@ type OperationExecutor interface {
|
|||||||
// If the volume is found, the actual state of the world is updated to mark
|
// If the volume is found, the actual state of the world is updated to mark
|
||||||
// the volume as attached.
|
// the volume as attached.
|
||||||
// If the volume does not implement the attacher interface, it is assumed to
|
// If the volume does not implement the attacher interface, it is assumed to
|
||||||
// be attached and the the actual state of the world is updated accordingly.
|
// be attached and the actual state of the world is updated accordingly.
|
||||||
// If the volume is not found or there is an error (fetching the node
|
// If the volume is not found or there is an error (fetching the node
|
||||||
// object, for example) then an error is returned which triggers exponential
|
// object, for example) then an error is returned which triggers exponential
|
||||||
// back off on retries.
|
// back off on retries.
|
||||||
@ -1006,7 +1006,7 @@ func (oe *operationExecutor) generateVerifyControllerAttachedVolumeFunc(
|
|||||||
return func() error {
|
return func() error {
|
||||||
if !volumeToMount.PluginIsAttachable {
|
if !volumeToMount.PluginIsAttachable {
|
||||||
// If the volume does not implement the attacher interface, it is
|
// If the volume does not implement the attacher interface, it is
|
||||||
// assumed to be attached and the the actual state of the world is
|
// assumed to be attached and the actual state of the world is
|
||||||
// updated accordingly.
|
// updated accordingly.
|
||||||
|
|
||||||
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
||||||
|
Loading…
Reference in New Issue
Block a user