e2e_kubeadm: clean up non-recommended import

This commit is contained in:
SataQiu 2019-07-26 12:02:26 +08:00
parent cc0137cdc6
commit fc2d6aea48
9 changed files with 95 additions and 95 deletions

View File

@ -22,8 +22,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -46,7 +46,7 @@ var _ = KubeadmDescribe("bootstrap token", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
It("should exist and be properly configured", func() {
ginkgo.It("should exist and be properly configured", func() {
secrets, err := f.ClientSet.CoreV1().
Secrets(kubeSystemNamespace).
List(metav1.ListOptions{})
@ -59,10 +59,10 @@ var _ = KubeadmDescribe("bootstrap token", func() {
tokenNum++
}
}
Expect(tokenNum).Should(BeNumerically(">", 0), "At least one bootstrap token should exist")
gomega.Expect(tokenNum).Should(gomega.BeNumerically(">", 0), "At least one bootstrap token should exist")
})
It("should be allowed to post CSR for kubelet certificates on joining nodes", func() {
ginkgo.It("should be allowed to post CSR for kubelet certificates on joining nodes", func() {
ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet,
bootstrapTokensAllowPostCSRClusterRoleBinding,
rbacv1.GroupKind, bootstrapTokensGroup,
@ -71,7 +71,7 @@ var _ = KubeadmDescribe("bootstrap token", func() {
//TODO: check if possible to verify "allowed to post CSR" using subject asses review as well
})
It("should be allowed to auto approve CSR for kubelet certificates on joining nodes", func() {
ginkgo.It("should be allowed to auto approve CSR for kubelet certificates on joining nodes", func() {
ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet,
bootstrapTokensCSRAutoApprovalClusterRoleBinding,
rbacv1.GroupKind, bootstrapTokensGroup,

View File

@ -22,8 +22,8 @@ import (
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -53,23 +53,23 @@ var _ = KubeadmDescribe("cluster-info ConfigMap", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
It("should exist and be properly configured", func() {
ginkgo.It("should exist and be properly configured", func() {
// Nb. this is technically implemented a part of the bootstrap-token phase
cm := GetConfigMap(f.ClientSet, kubePublicNamespace, clusterInfoConfigMapName)
Expect(cm.Data).To(HaveKey(HavePrefix(bootstrapapi.JWSSignatureKeyPrefix)))
Expect(cm.Data).To(HaveKey(bootstrapapi.KubeConfigKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(gomega.HavePrefix(bootstrapapi.JWSSignatureKeyPrefix)))
gomega.Expect(cm.Data).To(gomega.HaveKey(bootstrapapi.KubeConfigKey))
//TODO: What else? server?
})
It("should have related Role and RoleBinding", func() {
ginkgo.It("should have related Role and RoleBinding", func() {
// Nb. this is technically implemented a part of the bootstrap-token phase
ExpectRole(f.ClientSet, kubePublicNamespace, clusterInfoRoleName)
ExpectRoleBinding(f.ClientSet, kubePublicNamespace, clusterInfoRoleBindingName)
})
It("should be accessible for anonymous", func() {
ginkgo.It("should be accessible for anonymous", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.UserKind, anonymousUser,
clusterInfoConfigMapResource,

View File

@ -23,8 +23,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -44,12 +44,12 @@ var _ = KubeadmDescribe("control-plane node", func() {
// Important! please note that this test can't be run on single-node clusters
// in case you can skip this test with SKIP=multi-node
It("should be labelled and tainted [multi-node]", func() {
ginkgo.It("should be labelled and tainted [multi-node]", func() {
// get all control-plane nodes (and this implicitly checks that node are properly labeled)
controlPlanes := getControlPlaneNodes(f.ClientSet)
// checks if there is at least one control-plane node
Expect(controlPlanes.Items).NotTo(BeEmpty(), "at least one node with label %s should exist. if you are running test on a single-node cluster, you can skip this test with SKIP=multi-node", controlPlaneTaint)
gomega.Expect(controlPlanes.Items).NotTo(gomega.BeEmpty(), "at least one node with label %s should exist. if you are running test on a single-node cluster, you can skip this test with SKIP=multi-node", controlPlaneTaint)
// checks that the control-plane nodes have the expected taint
for _, cp := range controlPlanes.Items {

View File

@ -19,8 +19,8 @@ package e2e_kubeadm
import (
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -54,7 +54,7 @@ var _ = KubeadmDescribe("DNS addon", func() {
// kubeadm supports two type of DNS addon, and so
// it is necessary to get it from the kubeadm-config ConfigMap before testing
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// if the dnsType name is already known exit
if dnsType != "" {
return
@ -73,9 +73,9 @@ var _ = KubeadmDescribe("DNS addon", func() {
}
})
Context("kube-dns", func() {
Context("kube-dns ServiceAccount", func() {
It("should exist", func() {
ginkgo.Context("kube-dns", func() {
ginkgo.Context("kube-dns ServiceAccount", func() {
ginkgo.It("should exist", func() {
if dnsType != "kube-dns" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
@ -84,22 +84,22 @@ var _ = KubeadmDescribe("DNS addon", func() {
})
})
Context("kube-dns Deployment", func() {
It("should exist and be properly configured", func() {
ginkgo.Context("kube-dns Deployment", func() {
ginkgo.It("should exist and be properly configured", func() {
if dnsType != "kube-dns" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
d := GetDeployment(f.ClientSet, kubeSystemNamespace, kubeDNSDeploymentName)
Expect(d.Spec.Template.Spec.ServiceAccountName).To(Equal(kubeDNSServiceAccountName))
gomega.Expect(d.Spec.Template.Spec.ServiceAccountName).To(gomega.Equal(kubeDNSServiceAccountName))
})
})
})
Context("CoreDNS", func() {
Context("CoreDNS ServiceAccount", func() {
It("should exist", func() {
ginkgo.Context("CoreDNS", func() {
ginkgo.Context("CoreDNS ServiceAccount", func() {
ginkgo.It("should exist", func() {
if dnsType != "CoreDNS" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
@ -107,7 +107,7 @@ var _ = KubeadmDescribe("DNS addon", func() {
ExpectServiceAccount(f.ClientSet, kubeSystemNamespace, coreDNSServiceAccountName)
})
It("should have related ClusterRole and ClusterRoleBinding", func() {
ginkgo.It("should have related ClusterRole and ClusterRoleBinding", func() {
if dnsType != "CoreDNS" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
@ -117,33 +117,33 @@ var _ = KubeadmDescribe("DNS addon", func() {
})
})
Context("CoreDNS ConfigMap", func() {
It("should exist and be properly configured", func() {
ginkgo.Context("CoreDNS ConfigMap", func() {
ginkgo.It("should exist and be properly configured", func() {
if dnsType != "CoreDNS" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, coreDNSConfigMap)
Expect(cm.Data).To(HaveKey(coreDNSConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(coreDNSConfigMapKey))
})
})
Context("CoreDNS Deployment", func() {
It("should exist and be properly configured", func() {
ginkgo.Context("CoreDNS Deployment", func() {
ginkgo.It("should exist and be properly configured", func() {
if dnsType != "CoreDNS" {
framework.Skipf("Skipping because DNS type is %s", dnsType)
}
d := GetDeployment(f.ClientSet, kubeSystemNamespace, coreDNSDeploymentName)
Expect(d.Spec.Template.Spec.ServiceAccountName).To(Equal(coreDNSServiceAccountName))
gomega.Expect(d.Spec.Template.Spec.ServiceAccountName).To(gomega.Equal(coreDNSServiceAccountName))
})
})
})
Context("DNS Service", func() {
It("should exist", func() {
ginkgo.Context("DNS Service", func() {
ginkgo.It("should exist", func() {
ExpectService(f.ClientSet, kubeSystemNamespace, dnsService)
})
})

View File

@ -24,8 +24,8 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -59,18 +59,18 @@ var _ = KubeadmDescribe("kubeadm-certs [copy-certs]", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
It("should exist and be properly configured", func() {
ginkgo.It("should exist and be properly configured", func() {
s := GetSecret(f.ClientSet, kubeSystemNamespace, kubeadmCertsSecretName)
// Checks the kubeadm-certs is ownen by a time lived token
Expect(s.OwnerReferences).To(HaveLen(1), "%s should have one owner reference", kubeadmCertsSecretName)
gomega.Expect(s.OwnerReferences).To(gomega.HaveLen(1), "%s should have one owner reference", kubeadmCertsSecretName)
ownRef := s.OwnerReferences[0]
Expect(ownRef.Kind).To(Equal("Secret"), "%s should be owned by a secret", kubeadmCertsSecretName)
Expect(*ownRef.BlockOwnerDeletion).To(BeTrue(), "%s should be deleted on owner deletion", kubeadmCertsSecretName)
gomega.Expect(ownRef.Kind).To(gomega.Equal("Secret"), "%s should be owned by a secret", kubeadmCertsSecretName)
gomega.Expect(*ownRef.BlockOwnerDeletion).To(gomega.BeTrue(), "%s should be deleted on owner deletion", kubeadmCertsSecretName)
o := GetSecret(f.ClientSet, kubeSystemNamespace, ownRef.Name)
Expect(o.Type).To(Equal(corev1.SecretTypeBootstrapToken), "%s should have an owner reference that refers to a bootstrap-token", kubeadmCertsSecretName)
Expect(o.Data).To(HaveKey("expiration"), "%s should have an owner reference with an expiration", kubeadmCertsSecretName)
gomega.Expect(o.Type).To(gomega.Equal(corev1.SecretTypeBootstrapToken), "%s should have an owner reference that refers to a bootstrap-token", kubeadmCertsSecretName)
gomega.Expect(o.Data).To(gomega.HaveKey("expiration"), "%s should have an owner reference with an expiration", kubeadmCertsSecretName)
// gets the ClusterConfiguration from the kubeadm kubeadm-config ConfigMap as a untyped map
m := getClusterConfiguration(f.ClientSet)
@ -85,29 +85,29 @@ var _ = KubeadmDescribe("kubeadm-certs [copy-certs]", func() {
}
// check if all the expected key exists
Expect(s.Data).To(HaveKey("ca.crt"))
Expect(s.Data).To(HaveKey("ca.key"))
Expect(s.Data).To(HaveKey("front-proxy-ca.crt"))
Expect(s.Data).To(HaveKey("front-proxy-ca.key"))
Expect(s.Data).To(HaveKey("sa.pub"))
Expect(s.Data).To(HaveKey("sa.key"))
gomega.Expect(s.Data).To(gomega.HaveKey("ca.crt"))
gomega.Expect(s.Data).To(gomega.HaveKey("ca.key"))
gomega.Expect(s.Data).To(gomega.HaveKey("front-proxy-ca.crt"))
gomega.Expect(s.Data).To(gomega.HaveKey("front-proxy-ca.key"))
gomega.Expect(s.Data).To(gomega.HaveKey("sa.pub"))
gomega.Expect(s.Data).To(gomega.HaveKey("sa.key"))
if etcdType == "local" {
Expect(s.Data).To(HaveKey("etcd-ca.crt"))
Expect(s.Data).To(HaveKey("etcd-ca.key"))
gomega.Expect(s.Data).To(gomega.HaveKey("etcd-ca.crt"))
gomega.Expect(s.Data).To(gomega.HaveKey("etcd-ca.key"))
} else {
Expect(s.Data).To(HaveKey("external-etcd-ca.crt"))
Expect(s.Data).To(HaveKey("external-etcd.crt"))
Expect(s.Data).To(HaveKey("external-etcd.key"))
gomega.Expect(s.Data).To(gomega.HaveKey("external-etcd-ca.crt"))
gomega.Expect(s.Data).To(gomega.HaveKey("external-etcd.crt"))
gomega.Expect(s.Data).To(gomega.HaveKey("external-etcd.key"))
}
})
It("should have related Role and RoleBinding", func() {
ginkgo.It("should have related Role and RoleBinding", func() {
ExpectRole(f.ClientSet, kubeSystemNamespace, kubeadmCertsRoleName)
ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeadmCertsRoleBindingName)
})
It("should be accessible for bootstrap tokens", func() {
ginkgo.It("should be accessible for bootstrap tokens", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, bootstrapTokensGroup,
kubeadmCertsSecretResource,

View File

@ -24,8 +24,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -57,11 +57,11 @@ var _ = KubeadmDescribe("kubeadm-config ConfigMap", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
It("should exist and be properly configured", func() {
ginkgo.It("should exist and be properly configured", func() {
cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeadmConfigName)
Expect(cm.Data).To(HaveKey(kubeadmConfigClusterConfigurationConfigMapKey))
Expect(cm.Data).To(HaveKey(kubeadmConfigClusterStatusConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(kubeadmConfigClusterConfigurationConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(kubeadmConfigClusterStatusConfigMapKey))
m := unmarshalYaml(cm.Data[kubeadmConfigClusterStatusConfigMapKey])
if _, ok := m["apiEndpoints"]; ok {
@ -80,19 +80,19 @@ var _ = KubeadmDescribe("kubeadm-config ConfigMap", func() {
}
})
It("should have related Role and RoleBinding", func() {
ginkgo.It("should have related Role and RoleBinding", func() {
ExpectRole(f.ClientSet, kubeSystemNamespace, kubeadmConfigRoleName)
ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeadmConfigRoleBindingName)
})
It("should be accessible for bootstrap tokens", func() {
ginkgo.It("should be accessible for bootstrap tokens", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, bootstrapTokensGroup,
kubeadmConfigConfigMapResource,
)
})
It("should be accessible for for nodes", func() {
ginkgo.It("should be accessible for for nodes", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, nodesGroup,
kubeadmConfigConfigMapResource,
@ -103,7 +103,7 @@ var _ = KubeadmDescribe("kubeadm-config ConfigMap", func() {
func getClusterConfiguration(c clientset.Interface) map[interface{}]interface{} {
cm := GetConfigMap(c, kubeSystemNamespace, kubeadmConfigName)
Expect(cm.Data).To(HaveKey(kubeadmConfigClusterConfigurationConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(kubeadmConfigClusterConfigurationConfigMapKey))
return unmarshalYaml(cm.Data[kubeadmConfigClusterConfigurationConfigMapKey])
}

View File

@ -25,8 +25,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -60,7 +60,7 @@ var _ = KubeadmDescribe("kubelet-config ConfigMap", func() {
// kubelet-config map is named using the kubernetesVersion as a suffix, and so
// it is necessary to get it from the kubeadm-config ConfigMap before testing
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// if the kubelet-config map name is already known exit
if kubeletConfigConfigMapName != "" {
return
@ -70,7 +70,7 @@ var _ = KubeadmDescribe("kubelet-config ConfigMap", func() {
m := getClusterConfiguration(f.ClientSet)
// Extract the kubernetesVersion
Expect(m).To(HaveKey("kubernetesVersion"))
gomega.Expect(m).To(gomega.HaveKey("kubernetesVersion"))
k8sVersionString := m["kubernetesVersion"].(string)
k8sVersion, err := version.ParseSemantic(k8sVersionString)
if err != nil {
@ -84,25 +84,25 @@ var _ = KubeadmDescribe("kubelet-config ConfigMap", func() {
kubeletConfigConfigMapResource.Name = kubeletConfigConfigMapName
})
It("should exist and be properly configured", func() {
ginkgo.It("should exist and be properly configured", func() {
cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeletConfigConfigMapName)
Expect(cm.Data).To(HaveKey(kubeletConfigConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(kubeletConfigConfigMapKey))
})
It("should have related Role and RoleBinding", func() {
ginkgo.It("should have related Role and RoleBinding", func() {
ExpectRole(f.ClientSet, kubeSystemNamespace, kubeletConfigRoleName)
ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeletConfigRoleBindingName)
})
It("should be accessible for bootstrap tokens", func() {
ginkgo.It("should be accessible for bootstrap tokens", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, bootstrapTokensGroup,
kubeadmConfigConfigMapResource,
)
})
It("should be accessible for nodes", func() {
ginkgo.It("should be accessible for nodes", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, nodesGroup,
kubeadmConfigConfigMapResource,

View File

@ -21,8 +21,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -43,18 +43,18 @@ var _ = KubeadmDescribe("nodes", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
It("should have CRI annotation", func() {
ginkgo.It("should have CRI annotation", func() {
nodes, err := f.ClientSet.CoreV1().Nodes().
List(metav1.ListOptions{})
framework.ExpectNoError(err, "error reading nodes")
// checks that the nodes have the CRI annotation
for _, node := range nodes.Items {
Expect(node.Annotations).To(HaveKey(nodesCRISocketAnnotation))
gomega.Expect(node.Annotations).To(gomega.HaveKey(nodesCRISocketAnnotation))
}
})
It("should be allowed to rotate CSR", func() {
ginkgo.It("should be allowed to rotate CSR", func() {
// Nb. this is technically implemented a part of the bootstrap-token phase
ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet,
nodesCertificateRotationClusterRoleBinding,

View File

@ -21,8 +21,8 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -56,12 +56,12 @@ var _ = KubeadmDescribe("proxy addon", func() {
// so we are disabling the creation of a namespace in order to get a faster execution
f.SkipNamespaceCreation = true
Context("kube-proxy ServiceAccount", func() {
It("should exist", func() {
ginkgo.Context("kube-proxy ServiceAccount", func() {
ginkgo.It("should exist", func() {
ExpectServiceAccount(f.ClientSet, kubeSystemNamespace, kubeProxyServiceAccountName)
})
It("should be binded to the system:node-proxier cluster role", func() {
ginkgo.It("should be binded to the system:node-proxier cluster role", func() {
ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet,
kubeProxyClusterRoleBindingName,
rbacv1.ServiceAccountKind, kubeProxyServiceAccountName,
@ -70,19 +70,19 @@ var _ = KubeadmDescribe("proxy addon", func() {
})
})
Context("kube-proxy ConfigMap", func() {
It("should exist and be properly configured", func() {
ginkgo.Context("kube-proxy ConfigMap", func() {
ginkgo.It("should exist and be properly configured", func() {
cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeProxyConfigMap)
Expect(cm.Data).To(HaveKey(kubeProxyConfigMapKey))
gomega.Expect(cm.Data).To(gomega.HaveKey(kubeProxyConfigMapKey))
})
It("should have related Role and RoleBinding", func() {
ginkgo.It("should have related Role and RoleBinding", func() {
ExpectRole(f.ClientSet, kubeSystemNamespace, kubeProxyRoleName)
ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeProxyRoleBindingName)
})
It("should be accessible by bootstrap tokens", func() {
ginkgo.It("should be accessible by bootstrap tokens", func() {
ExpectSubjectHasAccessToResource(f.ClientSet,
rbacv1.GroupKind, bootstrapTokensGroup,
kubeProxyConfigMapResource,
@ -90,11 +90,11 @@ var _ = KubeadmDescribe("proxy addon", func() {
})
})
Context("kube-proxy DaemonSet", func() {
It("should exist and be properly configured", func() {
ginkgo.Context("kube-proxy DaemonSet", func() {
ginkgo.It("should exist and be properly configured", func() {
ds := GetDaemonSet(f.ClientSet, kubeSystemNamespace, kubeProxyDaemonSetName)
Expect(ds.Spec.Template.Spec.ServiceAccountName).To(Equal(kubeProxyServiceAccountName))
gomega.Expect(ds.Spec.Template.Spec.ServiceAccountName).To(gomega.Equal(kubeProxyServiceAccountName))
})
})
})