mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
test/e2e
This commit is contained in:
@@ -36,7 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
@@ -60,15 +60,15 @@ const (
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
// ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
|
||||
ClientSet internalclientset.Interface
|
||||
// ClientSet uses internal objects, you should use ClientSet where possible.
|
||||
ClientSet clientset.Interface
|
||||
|
||||
ClientSet_1_5 *release_1_5.Clientset
|
||||
StagingClient *staging.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
InternalClientset *internalclientset.Clientset
|
||||
StagingClient *staging.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
|
||||
Namespace *api.Namespace // Every test has at least one namespace
|
||||
namespacesToDelete []*api.Namespace // Some tests have more than one.
|
||||
Namespace *v1.Namespace // Every test has at least one namespace
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
|
||||
gatherer *containerResourceGatherer
|
||||
@@ -130,7 +130,7 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G
|
||||
return f
|
||||
}
|
||||
|
||||
func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework {
|
||||
func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
@@ -193,9 +193,9 @@ func (f *Framework) BeforeEach() {
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.ClientSet, err = internalclientset.NewForConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clientRepoConfig := getClientRepoConfig(config)
|
||||
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
|
||||
@@ -369,7 +369,7 @@ func (f *Framework) AfterEach() {
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name)
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
|
||||
LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
|
||||
if f.federated {
|
||||
@@ -439,7 +439,7 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) {
|
||||
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
|
||||
createTestingNS := TestContext.CreateTestingNS
|
||||
if createTestingNS == nil {
|
||||
createTestingNS = CreateTestingNS
|
||||
@@ -507,14 +507,14 @@ func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) {
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) {
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
|
||||
}
|
||||
|
||||
@@ -524,13 +524,13 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
for {
|
||||
// TODO: Endpoints client should take a field selector so we
|
||||
// don't have to list everything.
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{})
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv := list.ResourceVersion
|
||||
|
||||
isOK := func(e *api.Endpoints) bool {
|
||||
isOK := func(e *v1.Endpoints) bool {
|
||||
return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0
|
||||
}
|
||||
for i := range list.Items {
|
||||
@@ -539,8 +539,8 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
}
|
||||
}
|
||||
|
||||
options := api.ListOptions{
|
||||
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(),
|
||||
options := v1.ListOptions{
|
||||
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
|
||||
@@ -555,7 +555,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
// reget and re-watch
|
||||
break
|
||||
}
|
||||
if e, ok := val.Object.(*api.Endpoints); ok {
|
||||
if e, ok := val.Object.(*v1.Endpoints); ok {
|
||||
if isOK(e) {
|
||||
return nil
|
||||
}
|
||||
@@ -604,7 +604,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n api.Node) api.PodSpec, count int, block bool) (error, *api.Service) {
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
|
||||
var err error = nil
|
||||
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
|
||||
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
|
||||
@@ -615,7 +615,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int,
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
|
||||
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *api.Service {
|
||||
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
|
||||
if appName == "" {
|
||||
panic(fmt.Sprintf("no app name provided"))
|
||||
}
|
||||
@@ -625,11 +625,11 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
}
|
||||
|
||||
// For convenience, user sending ports are optional.
|
||||
portsFunc := func() []api.ServicePort {
|
||||
portsFunc := func() []v1.ServicePort {
|
||||
if contPort < 1 || svcPort < 1 {
|
||||
return nil
|
||||
} else {
|
||||
return []api.ServicePort{{
|
||||
return []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Port: int32(svcPort),
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
@@ -637,14 +637,14 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
}
|
||||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
"app": appName + "-service",
|
||||
},
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: portsFunc(),
|
||||
Selector: serviceSelector,
|
||||
},
|
||||
@@ -654,7 +654,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
labels := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
@@ -663,8 +663,8 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
||||
// one per node, but no more than maxCount.
|
||||
if i <= maxCount {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: labels,
|
||||
},
|
||||
@@ -834,22 +834,22 @@ type PodStateVerification struct {
|
||||
Selectors map[string]string
|
||||
|
||||
// Required: The phases which are valid for your pod.
|
||||
ValidPhases []api.PodPhase
|
||||
ValidPhases []v1.PodPhase
|
||||
|
||||
// Optional: only pods passing this function will pass the filter
|
||||
// Verify a pod.
|
||||
// As an optimization, in addition to specfying filter (boolean),
|
||||
// this function allows specifying an error as well.
|
||||
// The error indicates that the polling of the pod spectrum should stop.
|
||||
Verify func(api.Pod) (bool, error)
|
||||
Verify func(v1.Pod) (bool, error)
|
||||
|
||||
// Optional: only pods with this name will pass the filter.
|
||||
PodName string
|
||||
}
|
||||
|
||||
type ClusterVerification struct {
|
||||
client internalclientset.Interface
|
||||
namespace *api.Namespace // pointer rather than string, since ns isn't created until before each.
|
||||
client clientset.Interface
|
||||
namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
|
||||
podState PodStateVerification
|
||||
}
|
||||
|
||||
@@ -861,11 +861,11 @@ func (f *Framework) NewClusterVerification(filter PodStateVerification) *Cluster
|
||||
}
|
||||
}
|
||||
|
||||
func passesPodNameFilter(pod api.Pod, name string) bool {
|
||||
func passesPodNameFilter(pod v1.Pod, name string) bool {
|
||||
return name == "" || strings.Contains(pod.Name, name)
|
||||
}
|
||||
|
||||
func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool, error) {
|
||||
func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
|
||||
if verify == nil {
|
||||
return true, nil
|
||||
} else {
|
||||
@@ -879,7 +879,7 @@ func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool
|
||||
}
|
||||
}
|
||||
|
||||
func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
|
||||
func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
|
||||
passesPhaseFilter := false
|
||||
for _, phase := range validPhases {
|
||||
if pod.Status.Phase == phase {
|
||||
@@ -890,18 +890,18 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
|
||||
}
|
||||
|
||||
// filterLabels returns a list of pods which have labels.
|
||||
func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) {
|
||||
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
|
||||
var err error
|
||||
var selector labels.Selector
|
||||
var pl *api.PodList
|
||||
var pl *v1.PodList
|
||||
// List pods based on selectors. This might be a tiny optimization rather then filtering
|
||||
// everything manually.
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
pl, err = cli.Core().Pods(ns).List(options)
|
||||
} else {
|
||||
pl, err = cli.Core().Pods(ns).List(api.ListOptions{})
|
||||
pl, err = cli.Core().Pods(ns).List(v1.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
@@ -909,20 +909,20 @@ func filterLabels(selectors map[string]string, cli internalclientset.Interface,
|
||||
// filter filters pods which pass a filter. It can be used to compose
|
||||
// the more useful abstractions like ForEach, WaitFor, and so on, which
|
||||
// can be used directly by tests.
|
||||
func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) {
|
||||
func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
|
||||
if len(p.ValidPhases) == 0 || namespace == nil {
|
||||
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
|
||||
}
|
||||
|
||||
ns := namespace.Name
|
||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an api.PodList to operate against.
|
||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
||||
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||
if len(pl.Items) == 0 || err != nil {
|
||||
return pl.Items, err
|
||||
}
|
||||
|
||||
unfilteredPods := pl.Items
|
||||
filteredPods := []api.Pod{}
|
||||
filteredPods := []v1.Pod{}
|
||||
ReturnPodsSoFar:
|
||||
// Next: Pod must match at least one of the states that the user specified
|
||||
for _, pod := range unfilteredPods {
|
||||
@@ -943,8 +943,8 @@ ReturnPodsSoFar:
|
||||
|
||||
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
|
||||
// definition.
|
||||
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]api.Pod, error) {
|
||||
pods := []api.Pod{}
|
||||
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
|
||||
pods := []v1.Pod{}
|
||||
var returnedErr error
|
||||
|
||||
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
@@ -983,7 +983,7 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
|
||||
//
|
||||
// For example, if you require at least 5 pods to be running before your test will pass,
|
||||
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
|
||||
func (cl *ClusterVerification) ForEach(podFunc func(api.Pod)) error {
|
||||
func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
||||
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
||||
if err == nil {
|
||||
if len(pods) == 0 {
|
||||
|
||||
Reference in New Issue
Block a user