Merge pull request #89223 from gavinfish/inte-framework-e2e

Remove dependency for integration framework tests from e2e fw
This commit is contained in:
Kubernetes Prow Robot 2020-03-20 14:02:23 -07:00 committed by GitHub
commit 332cab4f3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 248 additions and 6 deletions

View File

@ -38,14 +38,18 @@ go_library(
"//cmd/kube-apiserver/app:go_default_library",
"//cmd/kube-apiserver/app/options:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/generated/openapi:go_default_library",
"//pkg/kubeapiserver:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/env:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
@ -67,7 +71,6 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/component-base/version:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",
"//vendor/github.com/google/uuid:go_default_library",

View File

@ -18,14 +18,13 @@ package framework
import (
"context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/klog"
testutils "k8s.io/kubernetes/test/utils"
)
const (
@ -100,7 +99,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
}
}
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
nodes, err := GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}
@ -120,7 +119,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
// CleanupNodes deletes existing test nodes.
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
nodes, err := GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}

View File

@ -19,11 +19,32 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"net/http/httptest"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// poll is how often to Poll pods, nodes and claims.
poll = 2 * time.Second
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
singleCallTimeout = 5 * time.Minute
)
// CreateTestingNamespace creates a namespace for testing.
@ -43,3 +64,222 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
}
// GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// If there are no nodes that are both ready and schedulable, this will return an error.
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
})
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster")
}
return nodes, nil
}
// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for integration test cluster", err)
}
return nodes, nil
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return nodes, err
}
return nodes, nil
}
// Filter filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// IsNodeSchedulable returns true if:
// 1) doesn't have "unschedulable" field set
// 2) it also returns true from IsNodeReady
func IsNodeSchedulable(node *v1.Node) bool {
if node == nil {
return false
}
return !node.Spec.Unschedulable && IsNodeReady(node)
}
// IsNodeReady returns true if:
// 1) it's Ready condition is set to true
// 2) doesn't have NetworkUnavailable condition set to true
func IsNodeReady(node *v1.Node) bool {
nodeReady := IsConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := isConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return nodeReady && networkReady
}
// IsConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging.
func IsConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
// IsConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue.
func IsConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
// isConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
func isConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
}
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
klog.Infof(msg)
}
return false
}
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
klog.Infof("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
}
if !silent {
klog.Infof("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if !silent {
klog.Infof("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
// TODO: need to discuss wether to return bool and error type
func isNodeUntainted(node *v1.Node) bool {
nonblockingTaints := ""
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulernodeinfo.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") {
if strings.TrimSpace(t) != "" {
nonblockingTaintsMap[strings.TrimSpace(t)] = struct{}{}
}
}
if len(nonblockingTaintsMap) > 0 {
nodeCopy := node.DeepCopy()
nodeCopy.Spec.Taints = []v1.Taint{}
for _, v := range node.Spec.Taints {
if _, isNonblockingTaint := nonblockingTaintsMap[v.Key]; !isNonblockingTaint {
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
}
}
nodeInfo.SetNode(nodeCopy)
} else {
nodeInfo.SetNode(node)
}
taints, err := nodeInfo.Taints()
if err != nil {
klog.Fatalf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
})
}