fix static check of importing the same package multiple times

Signed-off-by: prateekpandey14 <prateekpandey14@gmail.com>
This commit is contained in:
prateekpandey14 2021-05-09 23:35:06 +05:30
parent d38105be86
commit f9cf14f3f6
7 changed files with 32 additions and 40 deletions

View File

@ -32,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@ -106,7 +105,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
}
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
@ -162,7 +161,7 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim
func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) {
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
if err != nil {
klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err)
@ -243,7 +242,7 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
// The cache lookup is not setup or the object is not found in the cache.
// Get the object from the API server.
klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err = c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
attach, err = c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
if err != nil {
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
@ -524,7 +523,7 @@ func (c *csiAttacher) waitForVolumeAttachDetachStatus(attach *storage.VolumeAtta
return nil
}
watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(context.TODO(), meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion}))
if err != nil {
return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle)
}
@ -671,7 +670,7 @@ func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMoun
pvName := filepath.Base(dir)
// Get PV and check for errors
pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, meta.GetOptions{})
pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
return "", "", err
}

View File

@ -31,7 +31,6 @@ import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -59,7 +58,7 @@ var (
func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttachment {
return &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
@ -84,7 +83,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R
defer ticker.Stop()
// wait for attachment to be saved
for i := 0; i < 100; i++ {
attach, err = client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
attach, err = client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
<-ticker.C
@ -551,7 +550,7 @@ func TestAttacherWaitForAttach(t *testing.T) {
if err != nil {
t.Fatalf("failed to create VolumeAttachment: %v", err)
}
gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, meta.GetOptions{})
gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get created VolumeAttachment: %v", err)
}
@ -635,7 +634,7 @@ func TestAttacherWaitForAttachWithInline(t *testing.T) {
if err != nil {
t.Fatalf("failed to create VolumeAttachment: %v", err)
}
gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, meta.GetOptions{})
gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get created VolumeAttachment: %v", err)
}
@ -996,7 +995,7 @@ func TestAttacherDetach(t *testing.T) {
if !tc.shouldFail && err != nil {
t.Fatalf("unexpected err: %v", err)
}
attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), tc.attachID, meta.GetOptions{})
attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), tc.attachID, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
t.Fatalf("unexpected err: %v", err)

View File

@ -29,7 +29,6 @@ import (
api "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/volume"
@ -41,7 +40,7 @@ func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mapper, err := plug.NewBlockVolumeMapper(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod}},
&api.Pod{ObjectMeta: metav1.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod}},
volume.VolumeOptions{},
)
if err != nil {
@ -333,7 +332,7 @@ func TestBlockMapperMapPodDeviceNotSupportAttach(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
attachRequired := false
fakeDriver := &storagev1.CSIDriver{
ObjectMeta: meta.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: testDriver,
},
Spec: storagev1.CSIDriverSpec{
@ -373,7 +372,7 @@ func TestBlockMapperMapPodDeviceWithPodInfo(t *testing.T) {
attachRequired := false
podInfo := true
fakeDriver := &storagev1.CSIDriver{
ObjectMeta: meta.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: testDriver,
},
Spec: storagev1.CSIDriverSpec{

View File

@ -25,7 +25,6 @@ import (
"time"
api "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -65,11 +64,11 @@ func newTestPluginWithVolumeHost(t *testing.T, client *fakeclient.Clientset, hos
client = fakeclient.NewSimpleClientset()
}
client.Tracker().Add(&v1.Node{
client.Tracker().Add(&api.Node{
ObjectMeta: meta.ObjectMeta{
Name: "fakeNode",
},
Spec: v1.NodeSpec{},
Spec: api.NodeSpec{},
})
// Start informer for CSIDrivers.
@ -1094,11 +1093,11 @@ func TestPluginFindAttachablePlugin(t *testing.T) {
client := fakeclient.NewSimpleClientset(
getTestCSIDriver(test.driverName, nil, &test.canAttach, nil),
&v1.Node{
&api.Node{
ObjectMeta: meta.ObjectMeta{
Name: "fakeNode",
},
Spec: v1.NodeSpec{},
Spec: api.NodeSpec{},
},
)
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
@ -1221,11 +1220,11 @@ func TestPluginFindDeviceMountablePluginBySpec(t *testing.T) {
defer os.RemoveAll(tmpDir)
client := fakeclient.NewSimpleClientset(
&v1.Node{
&api.Node{
ObjectMeta: meta.ObjectMeta{
Name: "fakeNode",
},
Spec: v1.NodeSpec{},
Spec: api.NodeSpec{},
},
)
host := volumetest.NewFakeVolumeHostWithCSINodeName(t, tmpDir, client, ProbeVolumePlugins(), "fakeNode", nil, nil)

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/kubernetes/pkg/apis/core"
api "k8s.io/kubernetes/pkg/apis/core"
)
func makeSvc(externalIPs ...string) *core.Service {
@ -98,7 +97,7 @@ func TestAdmission(t *testing.T) {
attrs := admission.NewAttributesRecord(
tc.newSvc, // new object
tc.oldSvc, // old object
api.Kind("Service").WithVersion("version"),
core.Kind("Service").WithVersion("version"),
tc.newSvc.Namespace,
tc.newSvc.Name,
corev1.Resource("services").WithVersion("version"),

View File

@ -32,7 +32,6 @@ import (
schedulingv1listers "k8s.io/client-go/listers/scheduling/v1"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/pkg/apis/core"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
)
@ -99,7 +98,7 @@ func (p *Plugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactor
}
var (
podResource = api.Resource("pods")
podResource = core.Resource("pods")
priorityClassResource = scheduling.Resource("priorityclasses")
)
@ -146,13 +145,13 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi
// admitPod makes sure a new pod does not set spec.Priority field. It also makes sure that the PriorityClassName exists if it is provided and resolves the pod priority from the PriorityClassName.
func (p *Plugin) admitPod(a admission.Attributes) error {
operation := a.GetOperation()
pod, ok := a.GetObject().(*api.Pod)
pod, ok := a.GetObject().(*core.Pod)
if !ok {
return errors.NewBadRequest("resource was marked with kind Pod but was unable to be converted")
}
if operation == admission.Update {
oldPod, ok := a.GetOldObject().(*api.Pod)
oldPod, ok := a.GetOldObject().(*core.Pod)
if !ok {
return errors.NewBadRequest("resource was marked with kind Pod but was unable to be converted")
}

View File

@ -22,7 +22,6 @@ import (
"testing"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
nodev1 "k8s.io/api/node/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -34,7 +33,6 @@ import (
"k8s.io/client-go/kubernetes/fake"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/pkg/apis/core"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/features"
@ -257,18 +255,18 @@ func TestSetScheduling(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Handler: "bar",
Scheduling: &nodev1.Scheduling{
Tolerations: []v1.Toleration{
Tolerations: []corev1.Toleration{
{
Key: "foo",
Operator: v1.TolerationOpEqual,
Operator: corev1.TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "fizz",
Operator: v1.TolerationOpEqual,
Operator: corev1.TolerationOpEqual,
Value: "buzz",
Effect: v1.TaintEffectNoSchedule,
Effect: corev1.TaintEffectNoSchedule,
},
},
},
@ -407,19 +405,19 @@ func TestAdmit(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: runtimeClassName},
}
pod := api.Pod{
pod := core.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "podname"},
Spec: api.PodSpec{
Spec: core.PodSpec{
RuntimeClassName: &runtimeClassName,
},
}
attributes := admission.NewAttributesRecord(&pod,
nil,
api.Kind("kind").WithVersion("version"),
core.Kind("kind").WithVersion("version"),
"",
"",
api.Resource("pods").WithVersion("version"),
core.Resource("pods").WithVersion("version"),
"",
admission.Create,
nil,