Merge pull request #81892 from praseodym/fix-staticcheck-pkg/volume

Fix staticcheck failures for pkg/volume/...
This commit is contained in:
Kubernetes Prow Robot 2020-04-09 20:03:48 -07:00 committed by GitHub
commit 3d350d86d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 38 additions and 114 deletions

View File

@ -15,19 +15,6 @@ pkg/registry/core/service/ipallocator
pkg/registry/core/service/portallocator pkg/registry/core/service/portallocator
pkg/registry/core/service/storage pkg/registry/core/service/storage
pkg/util/coverage pkg/util/coverage
pkg/volume
pkg/volume/azure_dd
pkg/volume/csi
pkg/volume/flexvolume
pkg/volume/iscsi
pkg/volume/local
pkg/volume/quobyte
pkg/volume/rbd
pkg/volume/storageos
pkg/volume/util
pkg/volume/util/fsquota
pkg/volume/util/fsquota/common
pkg/volume/util/subpath
test/e2e/apps test/e2e/apps
test/e2e/autoscaling test/e2e/autoscaling
test/integration/examples test/integration/examples

View File

@ -65,7 +65,6 @@ var (
// only for Windows node // only for Windows node
winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`) winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`)
winDiskNumFormat = "/dev/disk%d"
) )
func getPath(uid types.UID, volName string, host volume.VolumeHost) string { func getPath(uid types.UID, volName string, host volume.VolumeHost) string {

View File

@ -30,6 +30,8 @@ import (
"k8s.io/utils/mount" "k8s.io/utils/mount"
) )
var winDiskNumFormat = "/dev/disk%d"
func scsiHostRescan(io ioHandler, exec utilexec.Interface) { func scsiHostRescan(io ioHandler, exec utilexec.Interface) {
cmd := "Update-HostStorageCache" cmd := "Update-HostStorageCache"
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()

View File

@ -94,7 +94,6 @@ type csiBlockMapper struct {
readOnly bool readOnly bool
spec *volume.Spec spec *volume.Spec
podUID types.UID podUID types.UID
volumeInfo map[string]string
} }
var _ volume.BlockVolumeMapper = &csiBlockMapper{} var _ volume.BlockVolumeMapper = &csiBlockMapper{}

View File

@ -53,40 +53,6 @@ func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T
return csiMapper, spec, pv, nil return csiMapper, spec, pv, nil
} }
func prepareBlockUnmapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) {
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// save volume data
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
dir,
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmapper: %v", err)
}
csiUnmapper := unmapper.(*csiBlockMapper)
csiUnmapper.csiClient = setupClient(t, true)
return csiUnmapper, spec, pv, nil
}
func TestBlockMapperGetGlobalMapPath(t *testing.T) { func TestBlockMapperGetGlobalMapPath(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()

View File

@ -23,7 +23,6 @@ import (
"io" "io"
"net" "net"
"sync" "sync"
"time"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -104,12 +103,6 @@ type nodeV1ClientCreator func(addr csiAddr) (
err error, err error,
) )
const (
initialDuration = 1 * time.Second
factor = 2.0
steps = 5
)
// newV1NodeClient creates a new NodeClient with the internally used gRPC // newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close // connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore. // the gRPC connection when the NodeClient is not used anymore.

View File

@ -81,6 +81,9 @@ func (c *fakeCsiDriverClient) NodeGetVolumeStats(ctx context.Context, volID stri
VolumePath: targetPath, VolumePath: targetPath,
} }
resp, err := c.nodeClient.NodeGetVolumeStats(ctx, req) resp, err := c.nodeClient.NodeGetVolumeStats(ctx, req)
if err != nil {
return nil, err
}
usages := resp.GetUsage() usages := resp.GetUsage()
metrics := &volume.Metrics{} metrics := &volume.Metrics{}
if usages == nil { if usages == nil {

View File

@ -73,7 +73,6 @@ type csiMountMgr struct {
spec *volume.Spec spec *volume.Spec
pod *api.Pod pod *api.Pod
podUID types.UID podUID types.UID
options volume.VolumeOptions
publishContext map[string]string publishContext map[string]string
kubeVolHost volume.KubeletVolumeHost kubeVolHost volume.KubeletVolumeHost
volume.MetricsProvider volume.MetricsProvider

View File

@ -456,8 +456,8 @@ func TestMounterSetupWithStatusTracking(t *testing.T) {
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}}, &api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
volume.VolumeOptions{}, volume.VolumeOptions{},
) )
if mounter == nil { if err != nil {
t.Fatal("failed to create CSI mounter") t.Fatalf("failed to create CSI mounter: %v", err)
} }
csiMounter := mounter.(*csiMountMgr) csiMounter := mounter.(*csiMountMgr)

View File

@ -623,7 +623,7 @@ func TestPluginNewMounter(t *testing.T) {
} }
csiClient, err := csiMounter.csiClientGetter.Get() csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil { if csiClient == nil {
t.Error("mounter csiClient is nil") t.Errorf("mounter csiClient is nil: %v", err)
} }
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -765,7 +765,7 @@ func TestPluginNewMounterWithInline(t *testing.T) {
} }
csiClient, err := csiMounter.csiClientGetter.Get() csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil { if csiClient == nil {
t.Error("mounter csiClient is nil") t.Errorf("mounter csiClient is nil: %v", err)
} }
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode { if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode) t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
@ -860,7 +860,7 @@ func TestPluginNewUnmounter(t *testing.T) {
csiClient, err := csiUnmounter.csiClientGetter.Get() csiClient, err := csiUnmounter.csiClientGetter.Get()
if csiClient == nil { if csiClient == nil {
t.Error("mounter csiClient is nil") t.Errorf("mounter csiClient is nil: %v", err)
} }
} }
@ -1185,7 +1185,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
} }
csiClient, err := csiMapper.csiClientGetter.Get() csiClient, err := csiMapper.csiClientGetter.Get()
if csiClient == nil { if csiClient == nil {
t.Error("mapper csiClient is nil") t.Errorf("mapper csiClient is nil: %v", err)
} }
// ensure data file is created // ensure data file is created
@ -1248,7 +1248,7 @@ func TestPluginNewUnmapper(t *testing.T) {
csiClient, err := csiUnmapper.csiClientGetter.Get() csiClient, err := csiUnmapper.csiClientGetter.Get()
if csiClient == nil { if csiClient == nil {
t.Error("unmapper csiClient is nil") t.Errorf("unmapper csiClient is nil: %v", err)
} }
// test loaded vol data // test loaded vol data

View File

@ -305,7 +305,8 @@ func TestCSI_VolumeAll(t *testing.T) {
go func(spec *volume.Spec, nodeName types.NodeName) { go func(spec *volume.Spec, nodeName types.NodeName) {
attachID, err := volAttacher.Attach(spec, nodeName) attachID, err := volAttacher.Attach(spec, nodeName)
if err != nil { if err != nil {
t.Fatalf("csiTest.VolumeAll attacher.Attach failed: %s", err) t.Errorf("csiTest.VolumeAll attacher.Attach failed: %s", err)
return
} }
t.Logf("csiTest.VolumeAll got attachID %s", attachID) t.Logf("csiTest.VolumeAll got attachID %s", attachID)

View File

@ -65,6 +65,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
_, fs, watcher, prober := initTestEnvironment(t) _, fs, watcher, prober := initTestEnvironment(t)
prober.Probe() prober.Probe()
events, err := prober.Probe() events, err := prober.Probe()
assert.NoError(t, err)
assert.Equal(t, 0, len(events)) assert.Equal(t, 0, len(events))
// Call probe after a file is added. Should return 1 event. // Call probe after a file is added. Should return 1 event.

View File

@ -92,8 +92,6 @@ func TestGetAccessModes(t *testing.T) {
type fakeDiskManager struct { type fakeDiskManager struct {
tmpDir string tmpDir string
attachCalled bool
detachCalled bool
} }
func NewFakeDiskManager() *fakeDiskManager { func NewFakeDiskManager() *fakeDiskManager {
@ -498,11 +496,11 @@ func TestGetISCSICHAP(t *testing.T) {
}, },
} }
for _, testcase := range tests { for _, testcase := range tests {
resultDiscoveryCHAP, err := getISCSIDiscoveryCHAPInfo(testcase.spec) resultDiscoveryCHAP, _ := getISCSIDiscoveryCHAPInfo(testcase.spec)
resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec) resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec)
switch testcase.name { switch testcase.name {
case "no volume": case "no volume":
if err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP { if err == nil || err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP {
t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v", t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v",
testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP, testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP,
err, resultDiscoveryCHAP, resultSessionCHAP) err, resultDiscoveryCHAP, resultSessionCHAP)

View File

@ -156,7 +156,7 @@ func TestWaitForPathToExist(t *testing.T) {
t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1]) t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1])
} }
exist = waitForPathToExistInternal(&devicePath[1], 1, "fake_iface", os.Stat, fakeFilepathGlob2) _ = waitForPathToExistInternal(&devicePath[1], 1, "fake_iface", os.Stat, fakeFilepathGlob2)
if devicePath[1] != fpath { if devicePath[1] != fpath {
t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1]) t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1])
} }

View File

@ -360,8 +360,7 @@ func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, devic
switch fileType { switch fileType {
case hostutil.FileTypeBlockDev: case hostutil.FileTypeBlockDev:
// local volume plugin does not implement AttachableVolumePlugin interface, so set devicePath to Path in PV spec directly // local volume plugin does not implement AttachableVolumePlugin interface, so set devicePath to Path in PV spec directly
devicePath = spec.PersistentVolume.Spec.Local.Path return dm.mountLocalBlockDevice(spec, spec.PersistentVolume.Spec.Local.Path, deviceMountPath)
return dm.mountLocalBlockDevice(spec, devicePath, deviceMountPath)
case hostutil.FileTypeDirectory: case hostutil.FileTypeDirectory:
// if the given local volume path is of already filesystem directory, return directly // if the given local volume path is of already filesystem directory, return directly
return nil return nil

View File

@ -116,13 +116,13 @@ func TestVolumePluginMgrFunc(t *testing.T) {
t.Errorf("Wrong name: %s", plug.GetPluginName()) t.Errorf("Wrong name: %s", plug.GetPluginName())
} }
plug, err = vpm.FindPluginBySpec(nil) _, err = vpm.FindPluginBySpec(nil)
if err == nil { if err == nil {
t.Errorf("Should return error if volume spec is nil") t.Errorf("Should return error if volume spec is nil")
} }
volumeSpec := &Spec{} volumeSpec := &Spec{}
plug, err = vpm.FindPluginBySpec(volumeSpec) _, err = vpm.FindPluginBySpec(volumeSpec)
if err != nil { if err != nil {
t.Errorf("Should return test plugin if volume spec is not nil") t.Errorf("Should return test plugin if volume spec is not nil")
} }

View File

@ -465,19 +465,14 @@ func parseAPIConfig(plugin *quobytePlugin, params map[string]string) (*quobyteAP
var apiServer, secretName string var apiServer, secretName string
secretNamespace := "default" secretNamespace := "default"
deleteKeys := []string{}
for k, v := range params { for k, v := range params {
switch gostrings.ToLower(k) { switch gostrings.ToLower(k) {
case "adminsecretname": case "adminsecretname":
secretName = v secretName = v
deleteKeys = append(deleteKeys, k)
case "adminsecretnamespace": case "adminsecretnamespace":
secretNamespace = v secretNamespace = v
deleteKeys = append(deleteKeys, k)
case "quobyteapiserver": case "quobyteapiserver":
apiServer = v apiServer = v
deleteKeys = append(deleteKeys, k)
} }
} }

View File

@ -883,10 +883,6 @@ type rbdDiskMapper struct {
id string id string
keyring string keyring string
secret string secret string
adminSecret string
adminID string
imageFormat string
imageFeatures []string
} }
var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{} var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{}

View File

@ -299,7 +299,6 @@ type storageos struct {
pvName string pvName string
volName string volName string
volNamespace string volNamespace string
secretName string
readOnly bool readOnly bool
description string description string
pool string pool string

View File

@ -373,8 +373,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeConfig := &fakeConfig{} apiCfg := GetAPIConfig()
apiCfg := fakeConfig.GetAPIConfig()
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), &testingexec.FakeExec{}) mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), &testingexec.FakeExec{})
if err != nil { if err != nil {
t.Fatalf("error creating a new internal mounter:%v", err) t.Fatalf("error creating a new internal mounter:%v", err)

View File

@ -39,14 +39,7 @@ var testPool = "testpool"
var testFSType = "ext2" var testFSType = "ext2"
var testVolUUID = "01c43d34-89f8-83d3-422b-43536a0f25e6" var testVolUUID = "01c43d34-89f8-83d3-422b-43536a0f25e6"
type fakeConfig struct { func GetAPIConfig() *storageosAPIConfig {
apiAddr string
apiUser string
apiPass string
apiVersion string
}
func (c fakeConfig) GetAPIConfig() *storageosAPIConfig {
return &storageosAPIConfig{ return &storageosAPIConfig{
apiAddr: "http://5.6.7.8:9999", apiAddr: "http://5.6.7.8:9999",
apiUser: "abc", apiUser: "abc",
@ -57,8 +50,7 @@ func (c fakeConfig) GetAPIConfig() *storageosAPIConfig {
func TestClient(t *testing.T) { func TestClient(t *testing.T) {
util := storageosUtil{} util := storageosUtil{}
cfg := fakeConfig{} err := util.NewAPI(GetAPIConfig())
err := util.NewAPI(cfg.GetAPIConfig())
if err != nil { if err != nil {
t.Fatalf("error getting api config: %v", err) t.Fatalf("error getting api config: %v", err)
} }

View File

@ -753,7 +753,7 @@ func checkVolumeContents(targetDir, tcName string, payload map[string]FileProjec
dataDirPath := filepath.Join(targetDir, dataDirName) dataDirPath := filepath.Join(targetDir, dataDirName)
// use filepath.Walk to reconstruct the payload, then deep equal // use filepath.Walk to reconstruct the payload, then deep equal
observedPayload := make(map[string]FileProjection) observedPayload := make(map[string]FileProjection)
visitor := func(path string, info os.FileInfo, err error) error { visitor := func(path string, info os.FileInfo, _ error) error {
if info.IsDir() { if info.IsDir() {
return nil return nil
} }

View File

@ -33,11 +33,6 @@ const (
BadQuotaID QuotaID = 0 BadQuotaID QuotaID = 0
) )
const (
acct = iota
enforcing = iota
)
// QuotaType -- type of quota to be applied // QuotaType -- type of quota to be applied
type QuotaType int type QuotaType int

View File

@ -165,7 +165,7 @@ func readProjectFiles(projects *os.File, projid *os.File) projectsList {
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) { func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
unusedQuotasSearched := 0 unusedQuotasSearched := 0
for id := common.FirstQuota; id == id; id++ { for id := common.FirstQuota; true; id++ {
if _, ok := idMap[id]; !ok { if _, ok := idMap[id]; !ok {
isInUse, err := getApplier(path).QuotaIDIsInUse(id) isInUse, err := getApplier(path).QuotaIDIsInUse(id)
if err != nil { if err != nil {

View File

@ -303,6 +303,7 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// AssignQuota chooses the quota ID based on the pod UID and path. // AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently // If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod. // doesn't) choose the same quota ID as other volumes in the pod.
//lint:ignore SA4009 poduid is overwritten by design, see comment below
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error {
if bytes == nil { if bytes == nil {
return fmt.Errorf("Attempting to assign null quota to %s", path) return fmt.Errorf("Attempting to assign null quota to %s", path)
@ -316,7 +317,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
// Current policy is to set individual quotas on each volumes. // Current policy is to set individual quotas on each volumes.
// If we decide later that we want to assign one quota for all // If we decide later that we want to assign one quota for all
// volumes in a pod, we can simply remove this line of code. // volumes in a pod, we can simply remove this line of code.
// If and when we decide permanently that we're going to adop // If and when we decide permanently that we're going to adopt
// one quota per volume, we can rip all of the pod code out. // one quota per volume, we can rip all of the pod code out.
poduid = types.UID(uuid.NewUUID()) poduid = types.UID(uuid.NewUUID())
if pod, ok := dirPodMap[path]; ok && pod != poduid { if pod, ok := dirPodMap[path]; ok && pod != poduid {

View File

@ -555,7 +555,7 @@ func runCaseDisabled(t *testing.T, testcase quotaTestCase, seq int) bool {
var supports bool var supports bool
switch testcase.op { switch testcase.op {
case "Supports": case "Supports":
if supports, err = fakeSupportsQuotas(testcase.path); supports { if supports, _ = fakeSupportsQuotas(testcase.path); supports {
t.Errorf("Case %v (%s, %v) supports quotas but shouldn't", seq, testcase.path, false) t.Errorf("Case %v (%s, %v) supports quotas but shouldn't", seq, testcase.path, false)
return true return true
} }

View File

@ -229,7 +229,7 @@ func doCleanSubPaths(mounter mount.Interface, podDir string, volumeName string)
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/* // scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/*
fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name())
err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, err error) error { err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, _ error) error {
if path == fullContainerDirPath { if path == fullContainerDirPath {
// Skip top level directory // Skip top level directory
return nil return nil

View File

@ -576,13 +576,13 @@ func TestCleanSubPaths(t *testing.T) {
return mounts, nil return mounts, nil
}, },
unmount: func(mountpath string) error { unmount: func(mountpath string) error {
err := filepath.Walk(mountpath, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(mountpath, func(path string, info os.FileInfo, _ error) error {
if path == mountpath { if path == mountpath {
// Skip top level directory // Skip top level directory
return nil return nil
} }
if err = os.Remove(path); err != nil { if err := os.Remove(path); err != nil {
return err return err
} }
return filepath.SkipDir return filepath.SkipDir