Merge pull request #61212 from charrywanganthony/duplicated_import

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

remove duplicated import

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-08-14 20:18:00 -07:00 committed by GitHub
commit 1f86c1cf26
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 11 additions and 17 deletions

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/storagebackend"
utilflag "k8s.io/apiserver/pkg/util/flag" utilflag "k8s.io/apiserver/pkg/util/flag"
auditbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered" auditbuffered "k8s.io/apiserver/plugin/pkg/audit/buffered"
@ -160,7 +159,7 @@ func TestAddFlags(t *testing.T) {
EnableWatchCache: true, EnableWatchCache: true,
DefaultWatchCacheSize: 100, DefaultWatchCacheSize: 100,
}, },
SecureServing: genericoptions.WithLoopback(&apiserveroptions.SecureServingOptions{ SecureServing: apiserveroptions.WithLoopback(&apiserveroptions.SecureServingOptions{
BindAddress: net.ParseIP("192.168.10.20"), BindAddress: net.ParseIP("192.168.10.20"),
BindPort: 6443, BindPort: 6443,
ServerCert: apiserveroptions.GeneratableKeyCert{ ServerCert: apiserveroptions.GeneratableKeyCert{

View File

@ -22,7 +22,6 @@ import (
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/apis/storage"
storageapi "k8s.io/kubernetes/pkg/apis/storage"
) )
// Funcs returns the fuzzer functions for the storage api group. // Funcs returns the fuzzer functions for the storage api group.
@ -32,7 +31,7 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
c.FuzzNoCustom(obj) // fuzz self without calling this function again c.FuzzNoCustom(obj) // fuzz self without calling this function again
reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimDelete, api.PersistentVolumeReclaimRetain} reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimDelete, api.PersistentVolumeReclaimRetain}
obj.ReclaimPolicy = &reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))] obj.ReclaimPolicy = &reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
bindingModes := []storageapi.VolumeBindingMode{storageapi.VolumeBindingImmediate, storageapi.VolumeBindingWaitForFirstConsumer} bindingModes := []storage.VolumeBindingMode{storage.VolumeBindingImmediate, storage.VolumeBindingWaitForFirstConsumer}
obj.VolumeBindingMode = &bindingModes[c.Rand.Intn(len(bindingModes))] obj.VolumeBindingMode = &bindingModes[c.Rand.Intn(len(bindingModes))]
}, },
} }

View File

@ -29,12 +29,11 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
) )
func noDefault(*api.Pod) error { return nil } func noDefault(*core.Pod) error { return nil }
func TestDecodeSinglePod(t *testing.T) { func TestDecodeSinglePod(t *testing.T) {
grace := int64(30) grace := int64(30)
@ -60,7 +59,7 @@ func TestDecodeSinglePod(t *testing.T) {
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}}, }},
SecurityContext: &v1.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
SchedulerName: api.DefaultSchedulerName, SchedulerName: core.DefaultSchedulerName,
}, },
} }
json, err := runtime.Encode(testapi.Default.Codec(), pod) json, err := runtime.Encode(testapi.Default.Codec(), pod)
@ -123,7 +122,7 @@ func TestDecodePodList(t *testing.T) {
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}}, }},
SecurityContext: &v1.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
SchedulerName: api.DefaultSchedulerName, SchedulerName: core.DefaultSchedulerName,
}, },
} }
podList := &v1.PodList{ podList := &v1.PodList{

View File

@ -24,8 +24,7 @@ import (
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -520,7 +519,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels) newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err return true, err
} }
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict { if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict") framework.Logf("failed to update node due to resource version conflict")
return false, nil return false, nil
} }
@ -734,7 +733,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) { func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
} }
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err) return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)

View File

@ -32,7 +32,6 @@ import (
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types" vim25types "github.com/vmware/govmomi/vim25/types"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
@ -618,7 +617,7 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
_, err := vm.PowerOff(ctx) _, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff) err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node") Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
} }
@ -630,7 +629,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
framework.Logf("Powering on node VM %s", nodeName) framework.Logf("Powering on node VM %s", nodeName)
vm.PowerOn(ctx) vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node") Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
} }

View File

@ -36,7 +36,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
externalclientset "k8s.io/client-go/kubernetes"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core"
serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount" serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount"
@ -536,7 +535,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}) })
} }
func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) { func doTokenReview(t *testing.T, cs clientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) {
t.Helper() t.Helper()
trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{ trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{
Spec: authenticationv1.TokenReviewSpec{ Spec: authenticationv1.TokenReviewSpec{