mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-11-03 23:40:03 +00:00 
			
		
		
		
	update clientset.Core() to clientset.CoreV1() in test
This commit is contained in:
		@@ -888,7 +888,7 @@ func (i *podInformer) Lister() corelisters.PodLister {
 | 
			
		||||
// NewPodInformer creates a shared index informer that returns only non-terminal pods.
 | 
			
		||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) coreinformers.PodInformer {
 | 
			
		||||
	selector := fields.ParseSelectorOrDie("status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
 | 
			
		||||
	lw := cache.NewListWatchFromClient(client.Core().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector)
 | 
			
		||||
	lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector)
 | 
			
		||||
	return &podInformer{
 | 
			
		||||
		informer: cache.NewSharedIndexInformer(lw, &v1.Pod{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
 | 
			
		||||
	}
 | 
			
		||||
@@ -923,7 +923,7 @@ func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
 | 
			
		||||
			// Get the pod again; it may have changed/been scheduled already.
 | 
			
		||||
			getBackoff := initialGetBackoff
 | 
			
		||||
			for {
 | 
			
		||||
				pod, err := factory.client.Core().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
 | 
			
		||||
				pod, err := factory.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{})
 | 
			
		||||
				if err == nil {
 | 
			
		||||
					if len(pod.Spec.NodeName) == 0 {
 | 
			
		||||
						podQueue.AddIfNotPresent(pod)
 | 
			
		||||
@@ -979,7 +979,7 @@ type podConditionUpdater struct {
 | 
			
		||||
func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error {
 | 
			
		||||
	glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
 | 
			
		||||
	if podutil.UpdatePodCondition(&pod.Status, condition) {
 | 
			
		||||
		_, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod)
 | 
			
		||||
		_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
 
 | 
			
		||||
@@ -163,7 +163,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	endpoints, err := cs.Core().Endpoints(namespace).Create(endpoints)
 | 
			
		||||
	endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
 | 
			
		||||
 | 
			
		||||
	return config, pod, ip
 | 
			
		||||
 
 | 
			
		||||
@@ -78,7 +78,7 @@ var _ = SIGDescribe("Firewall rule", func() {
 | 
			
		||||
				svc.Spec.Type = v1.ServiceTypeNodePort
 | 
			
		||||
				svc.Spec.LoadBalancerSourceRanges = nil
 | 
			
		||||
			})
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			By("Waiting for the local traffic health check firewall rule to be deleted")
 | 
			
		||||
			localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), false)
 | 
			
		||||
			_, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
 | 
			
		||||
@@ -120,7 +120,7 @@ var _ = SIGDescribe("Firewall rule", func() {
 | 
			
		||||
			jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true)
 | 
			
		||||
			defer func() {
 | 
			
		||||
				framework.Logf("Cleaning up the netexec pod: %v", podName)
 | 
			
		||||
				Expect(cs.Core().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
 | 
			
		||||
				Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			}()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -135,8 +135,8 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
 | 
			
		||||
	f := framework.NewDefaultFramework("no-snat-test")
 | 
			
		||||
	It("Should be able to send traffic between Pods without SNAT", func() {
 | 
			
		||||
		cs := f.ClientSet
 | 
			
		||||
		pc := cs.Core().Pods(f.Namespace.Name)
 | 
			
		||||
		nc := cs.Core().Nodes()
 | 
			
		||||
		pc := cs.CoreV1().Pods(f.Namespace.Name)
 | 
			
		||||
		nc := cs.CoreV1().Nodes()
 | 
			
		||||
 | 
			
		||||
		By("creating a test pod on each Node")
 | 
			
		||||
		nodes, err := nc.List(metav1.ListOptions{})
 | 
			
		||||
 
 | 
			
		||||
@@ -65,7 +65,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
	// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
 | 
			
		||||
 | 
			
		||||
	It("should provide secure master service [Conformance]", func() {
 | 
			
		||||
		_, err := cs.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
 | 
			
		||||
		_, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
@@ -80,7 +80,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
 | 
			
		||||
		By("creating service " + serviceName + " in namespace " + ns)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
@@ -96,7 +96,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
				}},
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		_, err := cs.Core().Services(ns).Create(service)
 | 
			
		||||
		_, err := cs.CoreV1().Services(ns).Create(service)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
 | 
			
		||||
@@ -104,7 +104,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		names := map[string]bool{}
 | 
			
		||||
		defer func() {
 | 
			
		||||
			for name := range names {
 | 
			
		||||
				err := cs.Core().Pods(ns).Delete(name, nil)
 | 
			
		||||
				err := cs.CoreV1().Pods(ns).Delete(name, nil)
 | 
			
		||||
				Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			}
 | 
			
		||||
		}()
 | 
			
		||||
@@ -136,7 +136,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		ns := f.Namespace.Name
 | 
			
		||||
 | 
			
		||||
		defer func() {
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
@@ -166,7 +166,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		_, err := cs.Core().Services(ns).Create(service)
 | 
			
		||||
		_, err := cs.CoreV1().Services(ns).Create(service)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		port1 := 100
 | 
			
		||||
		port2 := 101
 | 
			
		||||
@@ -175,7 +175,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		names := map[string]bool{}
 | 
			
		||||
		defer func() {
 | 
			
		||||
			for name := range names {
 | 
			
		||||
				err := cs.Core().Pods(ns).Delete(name, nil)
 | 
			
		||||
				err := cs.CoreV1().Pods(ns).Delete(name, nil)
 | 
			
		||||
				Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			}
 | 
			
		||||
		}()
 | 
			
		||||
@@ -235,7 +235,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the sourceip test service")
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		serviceIp := tcpService.Spec.ClusterIP
 | 
			
		||||
@@ -256,7 +256,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the echo server pod")
 | 
			
		||||
			err := cs.Core().Pods(ns).Delete(serverPodName, nil)
 | 
			
		||||
			err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
@@ -797,7 +797,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
 | 
			
		||||
@@ -821,7 +821,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the ExternalName to NodePort test service")
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
 | 
			
		||||
@@ -845,7 +845,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		clusterIPService := jig.CreateTCPServiceOrFail(ns, nil)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP)
 | 
			
		||||
@@ -869,7 +869,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		})
 | 
			
		||||
		defer func() {
 | 
			
		||||
			framework.Logf("Cleaning up the NodePort to ExternalName test service")
 | 
			
		||||
			err := cs.Core().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort)
 | 
			
		||||
@@ -1283,9 +1283,9 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
		acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil)
 | 
			
		||||
		dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil)
 | 
			
		||||
 | 
			
		||||
		acceptPod, err := cs.Core().Pods(namespace).Get(acceptPodName, metav1.GetOptions{})
 | 
			
		||||
		acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{})
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		dropPod, err := cs.Core().Pods(namespace).Get(dropPodName, metav1.GetOptions{})
 | 
			
		||||
		dropPod, err := cs.CoreV1().Pods(namespace).Get(dropPodName, metav1.GetOptions{})
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("creating a pod to be part of the service " + serviceName)
 | 
			
		||||
@@ -1304,7 +1304,7 @@ var _ = SIGDescribe("Services", func() {
 | 
			
		||||
				svc.Spec.Type = v1.ServiceTypeNodePort
 | 
			
		||||
				svc.Spec.LoadBalancerSourceRanges = nil
 | 
			
		||||
			})
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout)
 | 
			
		||||
@@ -1465,7 +1465,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
			for _, ips := range jig.GetEndpointNodes(svc) {
 | 
			
		||||
				Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(HaveOccurred())
 | 
			
		||||
			}
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		svcTCPPort := int(svc.Spec.Ports[0].Port)
 | 
			
		||||
@@ -1489,7 +1489,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
 | 
			
		||||
		svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true)
 | 
			
		||||
		defer func() {
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		tcpNodePort := int(svc.Spec.Ports[0].NodePort)
 | 
			
		||||
@@ -1527,7 +1527,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
		serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
 | 
			
		||||
		defer func() {
 | 
			
		||||
			jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
 | 
			
		||||
@@ -1580,7 +1580,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
		serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
 | 
			
		||||
		defer func() {
 | 
			
		||||
			jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
 | 
			
		||||
@@ -1593,7 +1593,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
			pod.Spec.NodeName = nodeName
 | 
			
		||||
		})
 | 
			
		||||
		defer func() {
 | 
			
		||||
			err := cs.Core().Pods(namespace).Delete(execPodName, nil)
 | 
			
		||||
			err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
		execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName, metav1.GetOptions{})
 | 
			
		||||
@@ -1631,7 +1631,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
 | 
			
		||||
		serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
 | 
			
		||||
		defer func() {
 | 
			
		||||
			jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
 | 
			
		||||
			Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
			Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		// save the health check node port because it disappears when ESIPP is turned off.
 | 
			
		||||
 
 | 
			
		||||
@@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
 | 
			
		||||
		// RC should be running successfully
 | 
			
		||||
		// TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event,
 | 
			
		||||
		// not for successful RC, since no specific pod name can be provided.
 | 
			
		||||
		_, err := cs.Core().ReplicationControllers(ns).Create(rc)
 | 
			
		||||
		_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
		framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
 | 
			
		||||
 | 
			
		||||
@@ -167,7 +167,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
 | 
			
		||||
		By("Launching two pods on two distinct nodes to get two node names")
 | 
			
		||||
		CreateHostPortPods(f, "host-port", 2, true)
 | 
			
		||||
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
 | 
			
		||||
		podList, err := cs.Core().Pods(ns).List(metav1.ListOptions{})
 | 
			
		||||
		podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
		Expect(len(podList.Items)).To(Equal(2))
 | 
			
		||||
		nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
 | 
			
		||||
@@ -220,7 +220,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
 | 
			
		||||
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, labelRCName)
 | 
			
		||||
 | 
			
		||||
		WaitForSchedulerAfterAction(f, func() error {
 | 
			
		||||
			_, err := cs.Core().ReplicationControllers(ns).Create(rc)
 | 
			
		||||
			_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
 | 
			
		||||
			return err
 | 
			
		||||
		}, labelRCName, false)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -87,7 +87,7 @@ func makeCudaAdditionTestPod() *v1.Pod {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isClusterRunningCOS(f *framework.Framework) bool {
 | 
			
		||||
	nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	framework.ExpectNoError(err, "getting node list")
 | 
			
		||||
	for _, node := range nodeList.Items {
 | 
			
		||||
		if !strings.Contains(node.Status.NodeInfo.OSImage, cosOSImage) {
 | 
			
		||||
@@ -99,7 +99,7 @@ func isClusterRunningCOS(f *framework.Framework) bool {
 | 
			
		||||
 | 
			
		||||
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
 | 
			
		||||
	framework.Logf("Getting list of Nodes from API server")
 | 
			
		||||
	nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	framework.ExpectNoError(err, "getting node list")
 | 
			
		||||
	for _, node := range nodeList.Items {
 | 
			
		||||
		if node.Spec.Unschedulable {
 | 
			
		||||
@@ -115,7 +115,7 @@ func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getGPUsAvailable(f *framework.Framework) int64 {
 | 
			
		||||
	nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
	framework.ExpectNoError(err, "getting node list")
 | 
			
		||||
	var gpusAvailable int64
 | 
			
		||||
	for _, node := range nodeList.Items {
 | 
			
		||||
 
 | 
			
		||||
@@ -41,7 +41,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
	BeforeEach(func() {
 | 
			
		||||
		if node == nil {
 | 
			
		||||
			// Priming invocation; select the first non-master node.
 | 
			
		||||
			nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
			nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			for _, n := range nodes.Items {
 | 
			
		||||
				if !system.IsMasterNode(n.Name) {
 | 
			
		||||
@@ -74,7 +74,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the pod was scheduled")
 | 
			
		||||
		action := func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		// Here we don't check for the bound node name since it can land on
 | 
			
		||||
@@ -101,7 +101,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the pod was scheduled")
 | 
			
		||||
		action := func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate := scheduleSuccessEvent(pod.Name, node.Name)
 | 
			
		||||
@@ -119,7 +119,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the pod was not scheduled")
 | 
			
		||||
		action := func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(f.NewTestPod("over-max-oir", requests, limits))
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(f.NewTestPod("over-max-oir", requests, limits))
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate := scheduleFailureEvent("over-max-oir")
 | 
			
		||||
@@ -164,7 +164,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the pod was scheduled")
 | 
			
		||||
		action := func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate := scheduleSuccessEvent(pod.Name, node.Name)
 | 
			
		||||
@@ -204,7 +204,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the pod was not scheduled")
 | 
			
		||||
		action = func() error {
 | 
			
		||||
			_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate = scheduleFailureEvent(pod.Name)
 | 
			
		||||
@@ -230,7 +230,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates one pod was scheduled")
 | 
			
		||||
		action := func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod1)
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod1)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate := scheduleSuccessEvent(pod1.Name, node.Name)
 | 
			
		||||
@@ -240,7 +240,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates a subsequent pod was not scheduled")
 | 
			
		||||
		action = func() error {
 | 
			
		||||
			_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod2)
 | 
			
		||||
			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate = scheduleFailureEvent(pod2.Name)
 | 
			
		||||
@@ -250,7 +250,7 @@ var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
 | 
			
		||||
 | 
			
		||||
		By("Observing an event that indicates the second pod was scheduled after deleting the first pod")
 | 
			
		||||
		action = func() error {
 | 
			
		||||
			err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod1.Name, nil)
 | 
			
		||||
			err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod1.Name, nil)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		predicate = scheduleSuccessEvent(pod2.Name, node.Name)
 | 
			
		||||
@@ -265,7 +265,7 @@ func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1
 | 
			
		||||
	action := func() error {
 | 
			
		||||
		By(fmt.Sprintf("Adding OIR to node [%s]", nodeName))
 | 
			
		||||
		patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName)))
 | 
			
		||||
		return f.ClientSet.Core().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
 | 
			
		||||
		return f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
 | 
			
		||||
	}
 | 
			
		||||
	predicate := func(n *v1.Node) bool {
 | 
			
		||||
		capacity, foundCap := n.Status.Capacity[opaqueResName]
 | 
			
		||||
@@ -284,7 +284,7 @@ func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName
 | 
			
		||||
	action := func() error {
 | 
			
		||||
		By(fmt.Sprintf("Removing OIR from node [%s]", nodeName))
 | 
			
		||||
		patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName)))
 | 
			
		||||
		f.ClientSet.Core().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
 | 
			
		||||
		f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
 | 
			
		||||
		return nil // Ignore error -- the opaque resource may not exist.
 | 
			
		||||
	}
 | 
			
		||||
	predicate := func(n *v1.Node) bool {
 | 
			
		||||
 
 | 
			
		||||
@@ -66,7 +66,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
 | 
			
		||||
	ignoreLabels := framework.ImagePullerLabels
 | 
			
		||||
 | 
			
		||||
	AfterEach(func() {
 | 
			
		||||
		rc, err := cs.Core().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
 | 
			
		||||
		rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
 | 
			
		||||
		if err == nil && *(rc.Spec.Replicas) != 0 {
 | 
			
		||||
			By("Cleaning up the replication controller")
 | 
			
		||||
			err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName)
 | 
			
		||||
@@ -166,7 +166,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
 | 
			
		||||
		}
 | 
			
		||||
		framework.WaitForStableCluster(cs, masterNodes)
 | 
			
		||||
 | 
			
		||||
		pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
 | 
			
		||||
		pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
		for _, pod := range pods.Items {
 | 
			
		||||
			_, found := nodeToAllocatableMap[pod.Spec.NodeName]
 | 
			
		||||
@@ -291,7 +291,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
 | 
			
		||||
		// already when the kubelet does not know about its new label yet. The
 | 
			
		||||
		// kubelet will then refuse to launch the pod.
 | 
			
		||||
		framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
 | 
			
		||||
		labelPod, err := cs.Core().Pods(ns).Get(labelPodName, metav1.GetOptions{})
 | 
			
		||||
		labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
		Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
 | 
			
		||||
	})
 | 
			
		||||
@@ -421,7 +421,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
 | 
			
		||||
		// already when the kubelet does not know about its new taint yet. The
 | 
			
		||||
		// kubelet will then refuse to launch the pod.
 | 
			
		||||
		framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName))
 | 
			
		||||
		deployedPod, err := cs.Core().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
 | 
			
		||||
		deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
		Expect(deployedPod.Spec.NodeName).To(Equal(nodeName))
 | 
			
		||||
	})
 | 
			
		||||
@@ -495,7 +495,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
 | 
			
		||||
	pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
 | 
			
		||||
	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
	return pod
 | 
			
		||||
}
 | 
			
		||||
@@ -503,7 +503,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
 | 
			
		||||
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
 | 
			
		||||
	pod := createPausePod(f, conf)
 | 
			
		||||
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
 | 
			
		||||
	pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
 | 
			
		||||
	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
	return pod
 | 
			
		||||
}
 | 
			
		||||
@@ -516,7 +516,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
 | 
			
		||||
	pod := runPausePod(f, conf)
 | 
			
		||||
 | 
			
		||||
	By("Explicitly delete pod here to free the resource it takes.")
 | 
			
		||||
	err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
	err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	return pod.Spec.NodeName
 | 
			
		||||
@@ -691,7 +691,7 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
 | 
			
		||||
 | 
			
		||||
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
 | 
			
		||||
	selector := labels.SelectorFromSet(labels.Set(labelsMap))
 | 
			
		||||
	allPods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
 | 
			
		||||
	allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
	return allPods
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -300,7 +300,7 @@ var _ = SIGDescribe("Volume plugin streaming [Slow]", func() {
 | 
			
		||||
 | 
			
		||||
		AfterEach(func() {
 | 
			
		||||
			framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
 | 
			
		||||
			epErr := cs.Core().Endpoints(ns).Delete(name, nil)
 | 
			
		||||
			epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil)
 | 
			
		||||
			framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
 | 
			
		||||
			err := framework.DeletePodWithWait(f, cs, serverPod)
 | 
			
		||||
			if epErr != nil || err != nil {
 | 
			
		||||
@@ -389,7 +389,7 @@ var _ = SIGDescribe("Volume plugin streaming [Slow]", func() {
 | 
			
		||||
				Type: "kubernetes.io/rbd",
 | 
			
		||||
			}
 | 
			
		||||
			var err error
 | 
			
		||||
			secret, err = cs.Core().Secrets(ns).Create(secret)
 | 
			
		||||
			secret, err = cs.CoreV1().Secrets(ns).Create(secret)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("BeforeEach: failed to create secret %q for Ceph-RBD: %v", name, err))
 | 
			
		||||
 | 
			
		||||
			volSource = v1.VolumeSource{
 | 
			
		||||
@@ -409,7 +409,7 @@ var _ = SIGDescribe("Volume plugin streaming [Slow]", func() {
 | 
			
		||||
 | 
			
		||||
		AfterEach(func() {
 | 
			
		||||
			framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", name)
 | 
			
		||||
			secErr := cs.Core().Secrets(ns).Delete(name, &metav1.DeleteOptions{})
 | 
			
		||||
			secErr := cs.CoreV1().Secrets(ns).Delete(name, &metav1.DeleteOptions{})
 | 
			
		||||
			framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
 | 
			
		||||
			err := framework.DeletePodWithWait(f, cs, serverPod)
 | 
			
		||||
			if secErr != nil || err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Volumes", func() {
 | 
			
		||||
			defer func() {
 | 
			
		||||
				if clean {
 | 
			
		||||
					framework.VolumeTestCleanup(f, config)
 | 
			
		||||
					err := cs.Core().Endpoints(namespace.Name).Delete(name, nil)
 | 
			
		||||
					err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
 | 
			
		||||
					Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
 | 
			
		||||
				}
 | 
			
		||||
			}()
 | 
			
		||||
@@ -233,7 +233,7 @@ var _ = SIGDescribe("Volumes", func() {
 | 
			
		||||
				Type: "kubernetes.io/rbd",
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			secClient := cs.Core().Secrets(config.Namespace)
 | 
			
		||||
			secClient := cs.CoreV1().Secrets(config.Namespace)
 | 
			
		||||
 | 
			
		||||
			defer func() {
 | 
			
		||||
				if clean {
 | 
			
		||||
@@ -309,14 +309,14 @@ var _ = SIGDescribe("Volumes", func() {
 | 
			
		||||
 | 
			
		||||
			defer func() {
 | 
			
		||||
				if clean {
 | 
			
		||||
					if err := cs.Core().Secrets(namespace.Name).Delete(secret.Name, nil); err != nil {
 | 
			
		||||
					if err := cs.CoreV1().Secrets(namespace.Name).Delete(secret.Name, nil); err != nil {
 | 
			
		||||
						framework.Failf("unable to delete secret %v: %v", secret.Name, err)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			var err error
 | 
			
		||||
			if secret, err = cs.Core().Secrets(namespace.Name).Create(secret); err != nil {
 | 
			
		||||
			if secret, err = cs.CoreV1().Secrets(namespace.Name).Create(secret); err != nil {
 | 
			
		||||
				framework.Failf("unable to create test secret %s: %v", secret.Name, err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@@ -481,11 +481,11 @@ var _ = SIGDescribe("Volumes", func() {
 | 
			
		||||
					"third":  "this is the third file",
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			if _, err := cs.Core().ConfigMaps(namespace.Name).Create(configMap); err != nil {
 | 
			
		||||
			if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil {
 | 
			
		||||
				framework.Failf("unable to create test configmap: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = cs.Core().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
 | 
			
		||||
				_ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			// Test one ConfigMap mounted several times to test #28502
 | 
			
		||||
 
 | 
			
		||||
@@ -116,12 +116,12 @@ func createTestController(cs clientset.Interface, observedDeletions chan struct{
 | 
			
		||||
		&cache.ListWatch{
 | 
			
		||||
			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
 | 
			
		||||
				options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
 | 
			
		||||
				obj, err := cs.Core().Pods(ns).List(options)
 | 
			
		||||
				obj, err := cs.CoreV1().Pods(ns).List(options)
 | 
			
		||||
				return runtime.Object(obj), err
 | 
			
		||||
			},
 | 
			
		||||
			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
 | 
			
		||||
				options.FieldSelector = fields.SelectorFromSet(fields.Set{"metadata.name": podName}).String()
 | 
			
		||||
				return cs.Core().Pods(ns).Watch(options)
 | 
			
		||||
				return cs.CoreV1().Pods(ns).Watch(options)
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		&v1.Pod{},
 | 
			
		||||
 
 | 
			
		||||
@@ -390,7 +390,7 @@ func TestSchedulerExtender(t *testing.T) {
 | 
			
		||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
 | 
			
		||||
	// NOTE: This test cannot run in parallel, because it is creating and deleting
 | 
			
		||||
	// non-namespaced objects (Nodes).
 | 
			
		||||
	defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
 | 
			
		||||
	defer cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	goodCondition := v1.NodeCondition{
 | 
			
		||||
		Type:              v1.NodeReady,
 | 
			
		||||
@@ -410,7 +410,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
 | 
			
		||||
 | 
			
		||||
	for ii := 0; ii < 5; ii++ {
 | 
			
		||||
		node.Name = fmt.Sprintf("machine%d", ii+1)
 | 
			
		||||
		if _, err := cs.Core().Nodes().Create(node); err != nil {
 | 
			
		||||
		if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
 | 
			
		||||
			t.Fatalf("Failed to create nodes: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -422,7 +422,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	myPod, err := cs.Core().Pods(ns.Name).Create(pod)
 | 
			
		||||
	myPod, err := cs.CoreV1().Pods(ns.Name).Create(pod)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -432,17 +432,17 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
 | 
			
		||||
		t.Fatalf("Failed to schedule pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	myPod, err = cs.Core().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
 | 
			
		||||
	myPod, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to get pod: %v", err)
 | 
			
		||||
	} else if myPod.Spec.NodeName != "machine2" {
 | 
			
		||||
		t.Fatalf("Failed to schedule using extender, expected machine2, got %v", myPod.Spec.NodeName)
 | 
			
		||||
	}
 | 
			
		||||
	var gracePeriod int64
 | 
			
		||||
	if err := cs.Core().Pods(ns.Name).Delete(myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil {
 | 
			
		||||
	if err := cs.CoreV1().Pods(ns.Name).Delete(myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil {
 | 
			
		||||
		t.Fatalf("Failed to delete pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	_, err = cs.Core().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
 | 
			
		||||
	_, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{})
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		t.Fatalf("Failed to delete pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -816,7 +816,7 @@ func TestInterPodAffinity(t *testing.T) {
 | 
			
		||||
			} else {
 | 
			
		||||
				nsName = context.ns.Name
 | 
			
		||||
			}
 | 
			
		||||
			createdPod, err := cs.Core().Pods(nsName).Create(pod)
 | 
			
		||||
			createdPod, err := cs.CoreV1().Pods(nsName).Create(pod)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
 | 
			
		||||
			}
 | 
			
		||||
@@ -825,7 +825,7 @@ func TestInterPodAffinity(t *testing.T) {
 | 
			
		||||
				t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		testPod, err := cs.Core().Pods(context.ns.Name).Create(test.pod)
 | 
			
		||||
		testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if !(test.errorType == "invalidPod" && errors.IsInvalid(err)) {
 | 
			
		||||
				t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
 | 
			
		||||
@@ -846,7 +846,7 @@ func TestInterPodAffinity(t *testing.T) {
 | 
			
		||||
				} else {
 | 
			
		||||
					nsName = context.ns.Name
 | 
			
		||||
				}
 | 
			
		||||
				err = cs.Core().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
				err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
 | 
			
		||||
				}
 | 
			
		||||
@@ -855,7 +855,7 @@ func TestInterPodAffinity(t *testing.T) {
 | 
			
		||||
					t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			err = cs.Core().Pods(context.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
			err = cs.CoreV1().Pods(context.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -285,7 +285,7 @@ func runPausePod(cs clientset.Interface, conf *pausePodConfig) (*v1.Pod, error)
 | 
			
		||||
// podDeleted returns true if a pod is not found in the given namespace.
 | 
			
		||||
func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
 | 
			
		||||
	return func() (bool, error) {
 | 
			
		||||
		_, err := c.Core().Pods(podNamespace).Get(podName, metav1.GetOptions{})
 | 
			
		||||
		_, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
 | 
			
		||||
		if errors.IsNotFound(err) {
 | 
			
		||||
			return true, nil
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user