mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #68491 from leakingtapan/golint-fix-volume-util
fix golint issue for pkg/volume/util
This commit is contained in:
commit
2119512b9e
@ -423,7 +423,6 @@ pkg/volume/scaleio
|
|||||||
pkg/volume/secret
|
pkg/volume/secret
|
||||||
pkg/volume/storageos
|
pkg/volume/storageos
|
||||||
pkg/volume/testing
|
pkg/volume/testing
|
||||||
pkg/volume/util
|
|
||||||
pkg/volume/util/fs
|
pkg/volume/util/fs
|
||||||
pkg/volume/util/recyclerclient
|
pkg/volume/util/recyclerclient
|
||||||
pkg/volume/util/volumepathhandler
|
pkg/volume/util/volumepathhandler
|
||||||
|
@ -61,6 +61,7 @@ type AtomicWriter struct {
|
|||||||
logContext string
|
logContext string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileProjection contains file Data and access Mode
|
||||||
type FileProjection struct {
|
type FileProjection struct {
|
||||||
Data []byte
|
Data []byte
|
||||||
Mode int32
|
Mode int32
|
||||||
@ -172,9 +173,8 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
|||||||
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
|
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
|
||||||
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
|
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
|
|
||||||
}
|
}
|
||||||
|
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
|
||||||
|
|
||||||
// (7)
|
// (7)
|
||||||
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
|
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
|
||||||
@ -222,7 +222,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
|
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
|
||||||
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
|
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
|
||||||
cleanPayload := make(map[string]FileProjection)
|
cleanPayload := make(map[string]FileProjection)
|
||||||
for k, content := range payload {
|
for k, content := range payload {
|
||||||
|
@ -33,7 +33,7 @@ const (
|
|||||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||||
DefaultMaxEBSVolumes = 39
|
DefaultMaxEBSVolumes = 39
|
||||||
// DefaultMaxEBSM5VolumeLimit is default EBS volume limit on m5 and c5 instances
|
// DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances
|
||||||
DefaultMaxEBSNitroVolumeLimit = 25
|
DefaultMaxEBSNitroVolumeLimit = 25
|
||||||
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
|
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
|
||||||
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
|
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
|
||||||
|
@ -25,10 +25,10 @@ type DeviceUtil interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type deviceHandler struct {
|
type deviceHandler struct {
|
||||||
get_io IoUtil
|
getIo IoUtil
|
||||||
}
|
}
|
||||||
|
|
||||||
//NewDeviceHandler Create a new IoHandler implementation
|
//NewDeviceHandler Create a new IoHandler implementation
|
||||||
func NewDeviceHandler(io IoUtil) DeviceUtil {
|
func NewDeviceHandler(io IoUtil) DeviceUtil {
|
||||||
return &deviceHandler{get_io: io}
|
return &deviceHandler{getIo: io}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
|
|
||||||
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
|
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
|
||||||
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
|
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
|
||||||
io := handler.get_io
|
io := handler.getIo
|
||||||
disk, err := findDeviceForPath(device, io)
|
disk, err := findDeviceForPath(device, io)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
@ -68,7 +68,7 @@ func findDeviceForPath(path string, io IoUtil) (string, error) {
|
|||||||
// which are managed by the devicemapper dm-1.
|
// which are managed by the devicemapper dm-1.
|
||||||
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
|
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
|
||||||
var devices []string
|
var devices []string
|
||||||
io := handler.get_io
|
io := handler.getIo
|
||||||
// Split path /dev/dm-1 into "", "dev", "dm-1"
|
// Split path /dev/dm-1 into "", "dev", "dm-1"
|
||||||
parts := strings.Split(dm, "/")
|
parts := strings.Split(dm, "/")
|
||||||
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
|
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
|
||||||
@ -92,7 +92,7 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
|
|||||||
// }
|
// }
|
||||||
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
|
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
|
||||||
portalHostMap := make(map[string]int)
|
portalHostMap := make(map[string]int)
|
||||||
io := handler.get_io
|
io := handler.getIo
|
||||||
|
|
||||||
// Iterate over all the iSCSI hosts in sysfs
|
// Iterate over all the iSCSI hosts in sysfs
|
||||||
sysPath := "/sys/class/iscsi_host"
|
sysPath := "/sys/class/iscsi_host"
|
||||||
@ -205,7 +205,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (
|
|||||||
// corresponding to that LUN.
|
// corresponding to that LUN.
|
||||||
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
|
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
|
||||||
devices := make([]string, 0)
|
devices := make([]string, 0)
|
||||||
io := handler.get_io
|
io := handler.getIo
|
||||||
|
|
||||||
// Iterate over all the iSCSI hosts in sysfs
|
// Iterate over all the iSCSI hosts in sysfs
|
||||||
sysPath := "/sys/class/iscsi_host"
|
sysPath := "/sys/class/iscsi_host"
|
||||||
|
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Contains utility code for use by volume plugins.
|
// Package util contains utility code for use by volume plugins.
|
||||||
package util // import "k8s.io/kubernetes/pkg/volume/util"
|
package util // import "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This error on attach indicates volume is attached to a different node
|
// DanglingAttachError indicates volume is attached to a different node
|
||||||
// than we expected.
|
// than we expected.
|
||||||
type DanglingAttachError struct {
|
type DanglingAttachError struct {
|
||||||
msg string
|
msg string
|
||||||
@ -32,6 +32,7 @@ func (err *DanglingAttachError) Error() string {
|
|||||||
return err.msg
|
return err.msg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewDanglingError create a new dangling error
|
||||||
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
|
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
|
||||||
return &DanglingAttachError{
|
return &DanglingAttachError{
|
||||||
msg: msg,
|
msg: msg,
|
||||||
|
@ -17,9 +17,9 @@ limitations under the License.
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Name of finalizer on PVCs that have a running pod.
|
// PVCProtectionFinalizer is the name of finalizer on PVCs that have a running pod.
|
||||||
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
|
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
|
||||||
|
|
||||||
// Name of finalizer on PVs that are bound by PVCs
|
// PVProtectionFinalizer is the name of finalizer on PVs that are bound by PVCs
|
||||||
PVProtectionFinalizer = "kubernetes.io/pv-protection"
|
PVProtectionFinalizer = "kubernetes.io/pv-protection"
|
||||||
)
|
)
|
||||||
|
@ -261,6 +261,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
|
|||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetClassForVolume locates storage class by persistent volume
|
||||||
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
|
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
return nil, fmt.Errorf("Cannot get kube client")
|
return nil, fmt.Errorf("Cannot get kube client")
|
||||||
@ -379,11 +380,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone
|
|||||||
}
|
}
|
||||||
// scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones.
|
// scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones.
|
||||||
// so if zoneFromNode != "", we can safely assume it is part of allowedZones.
|
// so if zoneFromNode != "", we can safely assume it is part of allowedZones.
|
||||||
if zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas); err != nil {
|
zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas)
|
||||||
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err)
|
return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err)
|
||||||
} else {
|
|
||||||
return zones, nil
|
|
||||||
}
|
}
|
||||||
|
return zones, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// pick zone from parameters if present
|
// pick zone from parameters if present
|
||||||
@ -405,11 +406,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone
|
|||||||
// pick zone from zones with nodes
|
// pick zone from zones with nodes
|
||||||
if zonesWithNodes.Len() > 0 {
|
if zonesWithNodes.Len() > 0 {
|
||||||
// If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes
|
// If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes
|
||||||
if zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas); err != nil {
|
zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas)
|
||||||
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err)
|
return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err)
|
||||||
} else {
|
|
||||||
return zones, nil
|
|
||||||
}
|
}
|
||||||
|
return zones, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("cannot determine zones to provision volume in")
|
return nil, fmt.Errorf("cannot determine zones to provision volume in")
|
||||||
}
|
}
|
||||||
@ -431,6 +432,7 @@ func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (se
|
|||||||
return zones, nil
|
return zones, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ZonesSetToLabelValue converts zones set to label value
|
||||||
func ZonesSetToLabelValue(strSet sets.String) string {
|
func ZonesSetToLabelValue(strSet sets.String) string {
|
||||||
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
|
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
|
||||||
}
|
}
|
||||||
@ -511,7 +513,7 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
|
|||||||
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
||||||
roundedUp := volumeSizeBytes / allocationUnitBytes
|
roundedUp := volumeSizeBytes / allocationUnitBytes
|
||||||
if volumeSizeBytes%allocationUnitBytes > 0 {
|
if volumeSizeBytes%allocationUnitBytes > 0 {
|
||||||
roundedUp += 1
|
roundedUp++
|
||||||
}
|
}
|
||||||
return roundedUp
|
return roundedUp
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
)
|
)
|
||||||
|
|
||||||
var nodeLabels map[string]string = map[string]string{
|
var nodeLabels = map[string]string{
|
||||||
"test-key1": "test-value1",
|
"test-key1": "test-value1",
|
||||||
"test-key2": "test-value2",
|
"test-key2": "test-value2",
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user