mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 15:37:24 +00:00
use core client with explicit version
fix more usage of deprecated core client
This commit is contained in:
@@ -131,7 +131,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
@@ -141,12 +141,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.Core().Pods(ns).Create(pod)
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
@@ -160,12 +160,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.Core().Pods(ns).Create(pod)
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
|
||||
@@ -286,7 +286,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
|
||||
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
|
||||
}
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.Core().Pods(namespace).List(options)
|
||||
return client.CoreV1().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
|
||||
@@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
|
||||
// Check for the existence of the Kibana service.
|
||||
ginkgo.By("Checking the Kibana service exists.")
|
||||
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
|
||||
s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
@@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
ginkgo.By("Checking to make sure the Kibana pods are running")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
@@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", err)
|
||||
return false, nil
|
||||
|
||||
@@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error {
|
||||
f := p.Framework
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
framework.Logf("Checking the Elasticsearch service exists.")
|
||||
s := f.ClientSet.Core().Services(api.NamespaceSystem)
|
||||
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
var err error
|
||||
@@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error {
|
||||
framework.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
||||
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
||||
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
|
||||
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error {
|
||||
err = nil
|
||||
var body []byte
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
@@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error {
|
||||
framework.Logf("Checking health of Elasticsearch service.")
|
||||
healthy := false
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
@@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() {
|
||||
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
f := p.Framework
|
||||
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
return nil
|
||||
|
||||
@@ -433,7 +433,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
By("Creating pods for each static PV")
|
||||
for _, config := range configs {
|
||||
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
|
||||
config.pod, err = c.Core().Pods(ns).Create(podConfig)
|
||||
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
|
||||
@@ -813,7 +813,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
tcpService := jig.CreateTCPServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the updating NodePorts test service")
|
||||
err := cs.Core().Services(ns).Delete(serviceName, nil)
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
|
||||
|
||||
@@ -119,7 +119,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) {
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) {
|
||||
selector := labels.Set(labelSet).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := f.ClientSet.Core().Pods(namespace).List(options)
|
||||
podList, err := f.ClientSet.CoreV1().Pods(namespace).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
t.tester.PauseNewPods(t.set)
|
||||
|
||||
By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
_, err := f.ClientSet.Core().Services(ns).Create(t.service)
|
||||
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
|
||||
@@ -210,7 +210,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str
|
||||
|
||||
func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
addPodConditionReady(pod, metav1.Now())
|
||||
_, err := c.Core().Pods(ns).UpdateStatus(pod)
|
||||
_, err := c.CoreV1().Pods(ns).UpdateStatus(pod)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user