Merge pull request #81892 from praseodym/fix-staticcheck-pkg/volume

Fix staticcheck failures for pkg/volume/...
This commit is contained in:
Kubernetes Prow Robot 2020-04-09 20:03:48 -07:00 committed by GitHub
commit 3d350d86d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 38 additions and 114 deletions

View File

@ -15,19 +15,6 @@ pkg/registry/core/service/ipallocator
pkg/registry/core/service/portallocator
pkg/registry/core/service/storage
pkg/util/coverage
pkg/volume
pkg/volume/azure_dd
pkg/volume/csi
pkg/volume/flexvolume
pkg/volume/iscsi
pkg/volume/local
pkg/volume/quobyte
pkg/volume/rbd
pkg/volume/storageos
pkg/volume/util
pkg/volume/util/fsquota
pkg/volume/util/fsquota/common
pkg/volume/util/subpath
test/e2e/apps
test/e2e/autoscaling
test/integration/examples

View File

@ -64,8 +64,7 @@ var (
string(api.AzureManagedDisk))
// only for Windows node
winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`)
winDiskNumFormat = "/dev/disk%d"
winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`)
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {

View File

@ -30,6 +30,8 @@ import (
"k8s.io/utils/mount"
)
var winDiskNumFormat = "/dev/disk%d"
func scsiHostRescan(io ioHandler, exec utilexec.Interface) {
cmd := "Update-HostStorageCache"
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()

View File

@ -94,7 +94,6 @@ type csiBlockMapper struct {
readOnly bool
spec *volume.Spec
podUID types.UID
volumeInfo map[string]string
}
var _ volume.BlockVolumeMapper = &csiBlockMapper{}

View File

@ -53,40 +53,6 @@ func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T
return csiMapper, spec, pv, nil
}
func prepareBlockUnmapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) {
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
pv := makeTestPV(specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
// save volume data
dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host)
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
dir,
volDataFileName,
map[string]string{
volDataKey.specVolID: pv.ObjectMeta.Name,
volDataKey.driverName: testDriver,
volDataKey.volHandle: testVol,
},
); err != nil {
t.Fatalf("failed to save volume data: %v", err)
}
unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID)
if err != nil {
t.Fatalf("failed to make a new Unmapper: %v", err)
}
csiUnmapper := unmapper.(*csiBlockMapper)
csiUnmapper.csiClient = setupClient(t, true)
return csiUnmapper, spec, pv, nil
}
func TestBlockMapperGetGlobalMapPath(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()

View File

@ -23,7 +23,6 @@ import (
"io"
"net"
"sync"
"time"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc"
@ -104,12 +103,6 @@ type nodeV1ClientCreator func(addr csiAddr) (
err error,
)
const (
initialDuration = 1 * time.Second
factor = 2.0
steps = 5
)
// newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must to be called to close
// the gRPC connection when the NodeClient is not used anymore.

View File

@ -81,6 +81,9 @@ func (c *fakeCsiDriverClient) NodeGetVolumeStats(ctx context.Context, volID stri
VolumePath: targetPath,
}
resp, err := c.nodeClient.NodeGetVolumeStats(ctx, req)
if err != nil {
return nil, err
}
usages := resp.GetUsage()
metrics := &volume.Metrics{}
if usages == nil {

View File

@ -73,7 +73,6 @@ type csiMountMgr struct {
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
publishContext map[string]string
kubeVolHost volume.KubeletVolumeHost
volume.MetricsProvider

View File

@ -456,8 +456,8 @@ func TestMounterSetupWithStatusTracking(t *testing.T) {
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
volume.VolumeOptions{},
)
if mounter == nil {
t.Fatal("failed to create CSI mounter")
if err != nil {
t.Fatalf("failed to create CSI mounter: %v", err)
}
csiMounter := mounter.(*csiMountMgr)

View File

@ -623,7 +623,7 @@ func TestPluginNewMounter(t *testing.T) {
}
csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil {
t.Error("mounter csiClient is nil")
t.Errorf("mounter csiClient is nil: %v", err)
}
if err != nil {
t.Fatal(err)
@ -765,7 +765,7 @@ func TestPluginNewMounterWithInline(t *testing.T) {
}
csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil {
t.Error("mounter csiClient is nil")
t.Errorf("mounter csiClient is nil: %v", err)
}
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
@ -860,7 +860,7 @@ func TestPluginNewUnmounter(t *testing.T) {
csiClient, err := csiUnmounter.csiClientGetter.Get()
if csiClient == nil {
t.Error("mounter csiClient is nil")
t.Errorf("mounter csiClient is nil: %v", err)
}
}
@ -1185,7 +1185,7 @@ func TestPluginNewBlockMapper(t *testing.T) {
}
csiClient, err := csiMapper.csiClientGetter.Get()
if csiClient == nil {
t.Error("mapper csiClient is nil")
t.Errorf("mapper csiClient is nil: %v", err)
}
// ensure data file is created
@ -1248,7 +1248,7 @@ func TestPluginNewUnmapper(t *testing.T) {
csiClient, err := csiUnmapper.csiClientGetter.Get()
if csiClient == nil {
t.Error("unmapper csiClient is nil")
t.Errorf("unmapper csiClient is nil: %v", err)
}
// test loaded vol data

View File

@ -305,7 +305,8 @@ func TestCSI_VolumeAll(t *testing.T) {
go func(spec *volume.Spec, nodeName types.NodeName) {
attachID, err := volAttacher.Attach(spec, nodeName)
if err != nil {
t.Fatalf("csiTest.VolumeAll attacher.Attach failed: %s", err)
t.Errorf("csiTest.VolumeAll attacher.Attach failed: %s", err)
return
}
t.Logf("csiTest.VolumeAll got attachID %s", attachID)

View File

@ -65,6 +65,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
_, fs, watcher, prober := initTestEnvironment(t)
prober.Probe()
events, err := prober.Probe()
assert.NoError(t, err)
assert.Equal(t, 0, len(events))
// Call probe after a file is added. Should return 1 event.

View File

@ -91,9 +91,7 @@ func TestGetAccessModes(t *testing.T) {
}
type fakeDiskManager struct {
tmpDir string
attachCalled bool
detachCalled bool
tmpDir string
}
func NewFakeDiskManager() *fakeDiskManager {
@ -498,11 +496,11 @@ func TestGetISCSICHAP(t *testing.T) {
},
}
for _, testcase := range tests {
resultDiscoveryCHAP, err := getISCSIDiscoveryCHAPInfo(testcase.spec)
resultDiscoveryCHAP, _ := getISCSIDiscoveryCHAPInfo(testcase.spec)
resultSessionCHAP, err := getISCSISessionCHAPInfo(testcase.spec)
switch testcase.name {
case "no volume":
if err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP {
if err == nil || err.Error() != testcase.expectedError.Error() || resultDiscoveryCHAP != testcase.expectedDiscoveryCHAP || resultSessionCHAP != testcase.expectedSessionCHAP {
t.Errorf("%s failed: expected err=%v DiscoveryCHAP=%v SessionCHAP=%v, got %v/%v/%v",
testcase.name, testcase.expectedError, testcase.expectedDiscoveryCHAP, testcase.expectedSessionCHAP,
err, resultDiscoveryCHAP, resultSessionCHAP)

View File

@ -156,7 +156,7 @@ func TestWaitForPathToExist(t *testing.T) {
t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1])
}
exist = waitForPathToExistInternal(&devicePath[1], 1, "fake_iface", os.Stat, fakeFilepathGlob2)
_ = waitForPathToExistInternal(&devicePath[1], 1, "fake_iface", os.Stat, fakeFilepathGlob2)
if devicePath[1] != fpath {
t.Errorf("waitForPathToExist: wrong code path called for %s", devicePath[1])
}

View File

@ -360,8 +360,7 @@ func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, devic
switch fileType {
case hostutil.FileTypeBlockDev:
// local volume plugin does not implement AttachableVolumePlugin interface, so set devicePath to Path in PV spec directly
devicePath = spec.PersistentVolume.Spec.Local.Path
return dm.mountLocalBlockDevice(spec, devicePath, deviceMountPath)
return dm.mountLocalBlockDevice(spec, spec.PersistentVolume.Spec.Local.Path, deviceMountPath)
case hostutil.FileTypeDirectory:
// if the given local volume path is of already filesystem directory, return directly
return nil

View File

@ -116,13 +116,13 @@ func TestVolumePluginMgrFunc(t *testing.T) {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
plug, err = vpm.FindPluginBySpec(nil)
_, err = vpm.FindPluginBySpec(nil)
if err == nil {
t.Errorf("Should return error if volume spec is nil")
}
volumeSpec := &Spec{}
plug, err = vpm.FindPluginBySpec(volumeSpec)
_, err = vpm.FindPluginBySpec(volumeSpec)
if err != nil {
t.Errorf("Should return test plugin if volume spec is not nil")
}

View File

@ -465,19 +465,14 @@ func parseAPIConfig(plugin *quobytePlugin, params map[string]string) (*quobyteAP
var apiServer, secretName string
secretNamespace := "default"
deleteKeys := []string{}
for k, v := range params {
switch gostrings.ToLower(k) {
case "adminsecretname":
secretName = v
deleteKeys = append(deleteKeys, k)
case "adminsecretnamespace":
secretNamespace = v
deleteKeys = append(deleteKeys, k)
case "quobyteapiserver":
apiServer = v
deleteKeys = append(deleteKeys, k)
}
}

View File

@ -879,14 +879,10 @@ var _ volume.BlockVolumeMapper = &rbdDiskMapper{}
type rbdDiskMapper struct {
*rbd
mon []string
id string
keyring string
secret string
adminSecret string
adminID string
imageFormat string
imageFeatures []string
mon []string
id string
keyring string
secret string
}
var _ volume.BlockVolumeUnmapper = &rbdDiskUnmapper{}

View File

@ -299,7 +299,6 @@ type storageos struct {
pvName string
volName string
volNamespace string
secretName string
readOnly bool
description string
pool string

View File

@ -373,8 +373,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
fakeManager := &fakePDManager{}
fakeConfig := &fakeConfig{}
apiCfg := fakeConfig.GetAPIConfig()
apiCfg := GetAPIConfig()
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), &testingexec.FakeExec{})
if err != nil {
t.Fatalf("error creating a new internal mounter:%v", err)

View File

@ -39,14 +39,7 @@ var testPool = "testpool"
var testFSType = "ext2"
var testVolUUID = "01c43d34-89f8-83d3-422b-43536a0f25e6"
type fakeConfig struct {
apiAddr string
apiUser string
apiPass string
apiVersion string
}
func (c fakeConfig) GetAPIConfig() *storageosAPIConfig {
func GetAPIConfig() *storageosAPIConfig {
return &storageosAPIConfig{
apiAddr: "http://5.6.7.8:9999",
apiUser: "abc",
@ -57,8 +50,7 @@ func (c fakeConfig) GetAPIConfig() *storageosAPIConfig {
func TestClient(t *testing.T) {
util := storageosUtil{}
cfg := fakeConfig{}
err := util.NewAPI(cfg.GetAPIConfig())
err := util.NewAPI(GetAPIConfig())
if err != nil {
t.Fatalf("error getting api config: %v", err)
}

View File

@ -753,7 +753,7 @@ func checkVolumeContents(targetDir, tcName string, payload map[string]FileProjec
dataDirPath := filepath.Join(targetDir, dataDirName)
// use filepath.Walk to reconstruct the payload, then deep equal
observedPayload := make(map[string]FileProjection)
visitor := func(path string, info os.FileInfo, err error) error {
visitor := func(path string, info os.FileInfo, _ error) error {
if info.IsDir() {
return nil
}

View File

@ -33,11 +33,6 @@ const (
BadQuotaID QuotaID = 0
)
const (
acct = iota
enforcing = iota
)
// QuotaType -- type of quota to be applied
type QuotaType int

View File

@ -165,7 +165,7 @@ func readProjectFiles(projects *os.File, projid *os.File) projectsList {
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
unusedQuotasSearched := 0
for id := common.FirstQuota; id == id; id++ {
for id := common.FirstQuota; true; id++ {
if _, ok := idMap[id]; !ok {
isInUse, err := getApplier(path).QuotaIDIsInUse(id)
if err != nil {

View File

@ -303,6 +303,7 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod.
//lint:ignore SA4009 poduid is overwritten by design, see comment below
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error {
if bytes == nil {
return fmt.Errorf("Attempting to assign null quota to %s", path)
@ -316,7 +317,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
// Current policy is to set individual quotas on each volumes.
// If we decide later that we want to assign one quota for all
// volumes in a pod, we can simply remove this line of code.
// If and when we decide permanently that we're going to adop
// If and when we decide permanently that we're going to adopt
// one quota per volume, we can rip all of the pod code out.
poduid = types.UID(uuid.NewUUID())
if pod, ok := dirPodMap[path]; ok && pod != poduid {

View File

@ -555,7 +555,7 @@ func runCaseDisabled(t *testing.T, testcase quotaTestCase, seq int) bool {
var supports bool
switch testcase.op {
case "Supports":
if supports, err = fakeSupportsQuotas(testcase.path); supports {
if supports, _ = fakeSupportsQuotas(testcase.path); supports {
t.Errorf("Case %v (%s, %v) supports quotas but shouldn't", seq, testcase.path, false)
return true
}

View File

@ -229,7 +229,7 @@ func doCleanSubPaths(mounter mount.Interface, podDir string, volumeName string)
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/*
fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name())
err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, err error) error {
err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, _ error) error {
if path == fullContainerDirPath {
// Skip top level directory
return nil

View File

@ -576,13 +576,13 @@ func TestCleanSubPaths(t *testing.T) {
return mounts, nil
},
unmount: func(mountpath string) error {
err := filepath.Walk(mountpath, func(path string, info os.FileInfo, err error) error {
err := filepath.Walk(mountpath, func(path string, info os.FileInfo, _ error) error {
if path == mountpath {
// Skip top level directory
return nil
}
if err = os.Remove(path); err != nil {
if err := os.Remove(path); err != nil {
return err
}
return filepath.SkipDir