Merge pull request #34444 from gmarek/refactor-sched-pred

Automatic merge from submit-queue

Small refactoring of scheduler predicates

Ref #34336, #34441
This commit is contained in:
Kubernetes Submit Queue 2016-10-10 05:38:34 -07:00 committed by GitHub
commit 525958d295
6 changed files with 219 additions and 153 deletions

View File

@ -20,7 +20,6 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
networking_util "k8s.io/kubernetes/test/utils"
) )
var _ = framework.KubeDescribe("Networking", func() { var _ = framework.KubeDescribe("Networking", func() {
@ -32,30 +31,30 @@ var _ = framework.KubeDescribe("Networking", func() {
// expect exactly one unique hostname. Each of these endpoints reports // expect exactly one unique hostname. Each of these endpoints reports
// its own hostname. // its own hostname.
It("should function for intra-pod communication: http [Conformance]", func() { It("should function for intra-pod communication: http [Conformance]", func() {
config := networking_util.NewCoreNetworkingTestConfig(f) config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, networking_util.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
It("should function for intra-pod communication: udp [Conformance]", func() { It("should function for intra-pod communication: udp [Conformance]", func() {
config := networking_util.NewCoreNetworkingTestConfig(f) config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, networking_util.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
It("should function for node-pod communication: http [Conformance]", func() { It("should function for node-pod communication: http [Conformance]", func() {
config := networking_util.NewCoreNetworkingTestConfig(f) config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, networking_util.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
It("should function for node-pod communication: udp [Conformance]", func() { It("should function for node-pod communication: udp [Conformance]", func() {
config := networking_util.NewCoreNetworkingTestConfig(f) config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, networking_util.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
}) })

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package utils package framework
import ( import (
"encoding/json" "encoding/json"
@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -57,7 +56,7 @@ const (
) )
// NewNetworkingTestConfig creates and sets up a new test config helper. // NewNetworkingTestConfig creates and sets up a new test config helper.
func NewNetworkingTestConfig(f *framework.Framework) *NetworkingTestConfig { func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name} config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setup(getServiceSelector()) config.setup(getServiceSelector())
@ -65,7 +64,7 @@ func NewNetworkingTestConfig(f *framework.Framework) *NetworkingTestConfig {
} }
// NewNetworkingTestNodeE2EConfig creates and sets up a new test config helper for Node E2E. // NewNetworkingTestNodeE2EConfig creates and sets up a new test config helper for Node E2E.
func NewCoreNetworkingTestConfig(f *framework.Framework) *NetworkingTestConfig { func NewCoreNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name} config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setupCore(getServiceSelector()) config.setupCore(getServiceSelector())
@ -94,8 +93,8 @@ type NetworkingTestConfig struct {
// test config. Each invocation of `setup` creates a service with // test config. Each invocation of `setup` creates a service with
// 1 pod per node running the netexecImage. // 1 pod per node running the netexecImage.
EndpointPods []*api.Pod EndpointPods []*api.Pod
f *framework.Framework f *Framework
podClient *framework.PodClient podClient *PodClient
// NodePortService is a Service with Type=NodePort spanning over all // NodePortService is a Service with Type=NodePort spanning over all
// endpointPods. // endpointPods.
NodePortService *api.Service NodePortService *api.Service
@ -134,10 +133,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
if foundEndpoints.Has(e.Name) { if foundEndpoints.Has(e.Name) {
continue continue
} }
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := framework.RunKubectl( desc, _ := RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
framework.Logf(desc) Logf(desc)
} }
} }
@ -179,11 +178,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
// A failure to kubectl exec counts as a try, not a hard fail. // A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
framework.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr) Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else { } else {
var output map[string][]string var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil { if err := json.Unmarshal([]byte(stdout), &output); err != nil {
framework.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err) cmd, config.HostTestContainerPod.Name, stdout, err)
continue continue
} }
@ -195,7 +194,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
} }
} }
} }
framework.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps)) Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries { if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -204,7 +203,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
} }
config.diagnoseMissingEndpoints(eps) config.diagnoseMissingEndpoints(eps)
framework.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps) Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps)
} }
// DialFromNode executes a tcp or udp request based on protocol via kubectl exec // DialFromNode executes a tcp or udp request based on protocol via kubectl exec
@ -237,14 +236,14 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// A failure to exec command counts as a try, not a hard fail. // A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
framework.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr) Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else { } else {
trimmed := strings.TrimSpace(stdout) trimmed := strings.TrimSpace(stdout)
if trimmed != "" { if trimmed != "" {
eps.Insert(trimmed) eps.Insert(trimmed)
} }
} }
framework.Logf("Waiting for %+v endpoints, got endpoints %+v", expectedEps.Difference(eps), eps) Logf("Waiting for %+v endpoints, got endpoints %+v", expectedEps.Difference(eps), eps)
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries { if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -253,7 +252,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
} }
config.diagnoseMissingEndpoints(eps) config.diagnoseMissingEndpoints(eps)
framework.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps) Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps)
} }
// GetSelfURL executes a curl against the given path via kubectl exec into a // GetSelfURL executes a curl against the given path via kubectl exec into a
@ -262,7 +261,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) { func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
cmd := fmt.Sprintf("curl -q -s --connect-timeout 1 http://localhost:10249%s", path) cmd := fmt.Sprintf("curl -q -s --connect-timeout 1 http://localhost:10249%s", path)
By(fmt.Sprintf("Getting kube-proxy self URL %s", path)) By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
stdout := framework.RunHostCmdOrDie(config.Namespace, config.HostTestContainerPod.Name, cmd) stdout := RunHostCmdOrDie(config.Namespace, config.HostTestContainerPod.Name, cmd)
Expect(strings.Contains(stdout, expected)).To(BeTrue()) Expect(strings.Contains(stdout, expected)).To(BeTrue())
} }
@ -380,23 +379,23 @@ func (config *NetworkingTestConfig) DeleteNodePortService() {
func (config *NetworkingTestConfig) createTestPods() { func (config *NetworkingTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec() testContainerPod := config.createTestPodSpec()
hostTestContainerPod := framework.NewHostExecPodSpec(config.Namespace, hostTestPodName) hostTestContainerPod := NewHostExecPodSpec(config.Namespace, hostTestPodName)
config.createPod(testContainerPod) config.createPod(testContainerPod)
config.createPod(hostTestContainerPod) config.createPod(hostTestContainerPod)
framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name) config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name)
if err != nil { if err != nil {
framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
} }
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name) config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name)
if err != nil { if err != nil {
framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
} }
} }
@ -404,7 +403,7 @@ func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api
_, err := config.getServiceClient().Create(serviceSpec) _, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = framework.WaitForService(config.f.Client, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) err = WaitForService(config.f.Client, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name) createdService, err := config.getServiceClient().Get(serviceSpec.Name)
@ -432,12 +431,12 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector) config.setupCore(selector)
By("Getting node addresses") By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client) nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP) config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.ExternalAddrs) < 2 { if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP) config.ExternalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
} }
Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP")) Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.Nodes = nodeList.Items config.Nodes = nodeList.Items
@ -483,8 +482,8 @@ func shuffleNodes(nodes []api.Node) []api.Node {
} }
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client)) ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client) nodeList := GetReadySchedulableNodesOrDie(config.f.Client)
// To make this test work reasonably fast in large clusters, // To make this test work reasonably fast in large clusters,
// we limit the number of NetProxyPods to no more than 100 ones // we limit the number of NetProxyPods to no more than 100 ones
@ -507,9 +506,9 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
// wait that all of them are up // wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes)) runningPods := make([]*api.Pod, 0, len(nodes))
for _, p := range createdPods { for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name)) ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name) rp, err := config.getPodClient().Get(p.Name)
framework.ExpectNoError(err) ExpectNoError(err)
runningPods = append(runningPods, rp) runningPods = append(runningPods, rp)
} }
@ -521,14 +520,14 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:] config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := framework.WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
framework.Failf("Failed to delete %s pod: %v", pod.Name, err) Failf("Failed to delete %s pod: %v", pod.Name, err)
} }
// wait for endpoint being removed. // wait for endpoint being removed.
err = framework.WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) err = WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName) Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
} }
// wait for kube-proxy to catch up with the pod being deleted. // wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -538,7 +537,7 @@ func (config *NetworkingTestConfig) createPod(pod *api.Pod) *api.Pod {
return config.getPodClient().Create(pod) return config.getPodClient().Create(pod)
} }
func (config *NetworkingTestConfig) getPodClient() *framework.PodClient { func (config *NetworkingTestConfig) getPodClient() *PodClient {
if config.podClient == nil { if config.podClient == nil {
config.podClient = config.f.PodClient() config.podClient = config.f.PodClient()
} }

View File

@ -79,6 +79,7 @@ import (
utilyaml "k8s.io/kubernetes/pkg/util/yaml" utilyaml "k8s.io/kubernetes/pkg/util/yaml"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
testutil "k8s.io/kubernetes/test/utils"
"github.com/blang/semver" "github.com/blang/semver"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
@ -3022,64 +3023,25 @@ func WaitForAllNodesSchedulable(c *client.Client) error {
}) })
} }
func AddOrUpdateLabelOnNode(c *client.Client, nodeName string, labelKey string, labelValue string) { func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, labelKey, labelValue) ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add a label %v:%v to %v", labelKey, labelValue, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
} }
func ExpectNodeHasLabel(c *client.Client, nodeName string, labelKey string, labelValue string) { func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue) By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Nodes().Get(nodeName) node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err) ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue)) Expect(node.Labels[labelKey]).To(Equal(labelValue))
} }
// RemoveLabelOffNode is for cleaning up labels temporarily added to node, // RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed. // won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c *client.Client, nodeName string, labelKey string) { func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName) By("removing the label " + labelKey + " off the node " + nodeName)
var nodeUpdated *api.Node ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
var node *api.Node
var err error
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err = c.Nodes().Get(nodeName)
ExpectNoError(err)
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
return
}
delete(node.Labels, labelKey)
nodeUpdated, err = c.Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove a label %v from %v", labelKey, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
ExpectNoError(err)
By("verifying the node doesn't have the label " + labelKey) By("verifying the node doesn't have the label " + labelKey)
if nodeUpdated.Labels != nil && len(nodeUpdated.Labels[labelKey]) != 0 { ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
Failf("Failed removing label " + labelKey + " of the node " + nodeName)
}
} }
func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) { func AddOrUpdateTaintOnNode(c *client.Client, nodeName string, taint api.Taint) {

View File

@ -23,7 +23,6 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
networking_util "k8s.io/kubernetes/test/utils"
) )
var _ = framework.KubeDescribe("Networking", func() { var _ = framework.KubeDescribe("Networking", func() {
@ -77,7 +76,7 @@ var _ = framework.KubeDescribe("Networking", func() {
It("should check kube-proxy urls", func() { It("should check kube-proxy urls", func() {
// TODO: this is overkill we just need the host networking pod // TODO: this is overkill we just need the host networking pod
// to hit kube-proxy urls. // to hit kube-proxy urls.
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By("checking kube-proxy URLs") By("checking kube-proxy URLs")
config.GetSelfURL("/healthz", "ok") config.GetSelfURL("/healthz", "ok")
@ -88,84 +87,84 @@ var _ = framework.KubeDescribe("Networking", func() {
framework.KubeDescribe("Granular Checks: Services [Slow]", func() { framework.KubeDescribe("Granular Checks: Services [Slow]", func() {
It("should function for pod-Service: http", func() { It("should function for pod-Service: http", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterHttpPort)) By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, networking_util.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeHttpPort)) By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeHttpPort))
config.DialFromTestContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for pod-Service: udp", func() { It("should function for pod-Service: udp", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterUdpPort)) By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, networking_util.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeUdpPort)) By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.ExternalAddrs[0], config.NodeUdpPort))
config.DialFromTestContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for node-Service: http", func() { It("should function for node-Service: http", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, networking_util.ClusterHttpPort)) By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromNode("http", config.ClusterIP, networking_util.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort)) By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for node-Service: udp", func() { It("should function for node-Service: udp", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, networking_util.ClusterUdpPort)) By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromNode("udp", config.ClusterIP, networking_util.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort)) By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for endpoint-Service: http", func() { It("should function for endpoint-Service: http", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, networking_util.ClusterHttpPort)) By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromEndpointContainer("http", config.ClusterIP, networking_util.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHttpPort)) By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHttpPort))
config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for endpoint-Service: udp", func() { It("should function for endpoint-Service: udp", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, networking_util.ClusterUdpPort)) By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromEndpointContainer("udp", config.ClusterIP, networking_util.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUdpPort)) By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUdpPort))
config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should update endpoints: http", func() { It("should update endpoints: http", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterHttpPort)) By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, networking_util.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterHttpPort)) By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, networking_util.ClusterHttpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
It("should update endpoints: udp", func() { It("should update endpoints: udp", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterUdpPort)) By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, networking_util.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, networking_util.ClusterUdpPort)) By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, networking_util.ClusterUdpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: http [Slow]", func() { It("should update nodePort: http [Slow]", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort)) By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
@ -177,7 +176,7 @@ var _ = framework.KubeDescribe("Networking", func() {
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: udp [Slow]", func() { It("should update nodePort: udp [Slow]", func() {
config := networking_util.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort)) By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -48,6 +49,7 @@ type pausePodConfig struct {
var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var c *client.Client var c *client.Client
var cs clientset.Interface
var nodeList *api.NodeList var nodeList *api.NodeList
var systemPodsNo int var systemPodsNo int
var totalPodCapacity int64 var totalPodCapacity int64
@ -67,6 +69,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
cs = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
nodeList = &api.NodeList{} nodeList = &api.NodeList{}
@ -262,9 +265,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42" v := "42"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.") By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels" labelPodName := "with-labels"
@ -333,9 +336,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42" v := "42"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to relaunch the pod, now with labels.") By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels" labelPodName := "with-labels"
@ -378,9 +381,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a label with fake az info on the found node.") By("Trying to apply a label with fake az info on the found node.")
k := "kubernetes.io/e2e-az-name" k := "kubernetes.io/e2e-az-name"
v := "e2e-az1" v := "e2e-az1"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.") By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
pod := createPodWithNodeAffinity(f) pod := createPodWithNodeAffinity(f)
@ -466,9 +469,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone" k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "china-e2etest" v := "china-e2etest"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to launch the pod, now with podAffinity.") By("Trying to launch the pod, now with podAffinity.")
labelPodName := "with-podaffinity-" + string(uuid.NewUUID()) labelPodName := "with-podaffinity-" + string(uuid.NewUUID())
@ -521,9 +524,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
k := "e2e.inter-pod-affinity.kubernetes.io/zone" k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "china-e2etest" v := "china-e2etest"
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
} }
By("Trying to launch another pod on the first node with the service label.") By("Trying to launch another pod on the first node with the service label.")
@ -569,9 +572,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone" k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "kubernetes-e2e" v := "kubernetes-e2e"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.") By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.")
labelPodName := "with-podaffinity-" + string(uuid.NewUUID()) labelPodName := "with-podaffinity-" + string(uuid.NewUUID())
@ -620,9 +623,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone" k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "e2e-testing" v := "e2e-testing"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to launch the pod, now with Pod affinity and anti affinity.") By("Trying to launch the pod, now with Pod affinity and anti affinity.")
pod := createPodWithPodAffinity(f, k) pod := createPodWithPodAffinity(f, k)
@ -645,9 +648,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a label with fake az info on the found node.") By("Trying to apply a label with fake az info on the found node.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone" k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "e2e-az1" v := "e2e-az1"
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(c, nodeName, k, v) framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(c, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.") By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.")
pod := createPodWithPodAffinity(f, "kubernetes.io/hostname") pod := createPodWithPodAffinity(f, "kubernetes.io/hostname")
@ -682,9 +685,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value" labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(c, nodeName, labelKey, labelValue) framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(c, nodeName, labelKey, labelValue) framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(c, nodeName, labelKey) defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, now with tolerations.") By("Trying to relaunch the pod, now with tolerations.")
tolerationPodName := "with-tolerations" tolerationPodName := "with-tolerations"
@ -734,9 +737,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
By("Trying to apply a random label on the found node.") By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value" labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(c, nodeName, labelKey, labelValue) framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(c, nodeName, labelKey, labelValue) framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(c, nodeName, labelKey) defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
By("Trying to relaunch the pod, still no tolerations.") By("Trying to relaunch the pod, still no tolerations.")
podNameNoTolerations := "still-no-tolerations" podNameNoTolerations := "still-no-tolerations"

104
test/utils/density_utils.go Normal file
View File

@ -0,0 +1,104 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"github.com/golang/glog"
)
const (
retries = 5
)
func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]string) error {
tokens := make([]string, 0, len(labels))
for k, v := range labels {
tokens = append(tokens, "\""+k+"\":\""+v+"\"")
}
labelString := "{" + strings.Join(tokens, ",") + "}"
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
var err error
for attempt := 0; attempt < retries; attempt++ {
_, err = c.Core().Nodes().Patch(nodeName, api.MergePatchType, []byte(patch))
if err != nil {
if !apierrs.IsConflict(err) {
return err
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
var node *api.Node
var err error
for attempt := 0; attempt < retries; attempt++ {
node, err = c.Core().Nodes().Get(nodeName)
if err != nil {
return err
}
if node.Labels == nil {
return nil
}
for _, labelKey := range labelKeys {
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
break
}
delete(node.Labels, labelKey)
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
return err
} else {
glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
// VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys.
// Return non-nil error if it does.
func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error {
node, err := c.Core().Nodes().Get(nodeName)
if err != nil {
return err
}
for _, labelKey := range labelKeys {
if node.Labels != nil && len(node.Labels[labelKey]) != 0 {
return fmt.Errorf("Failed removing label " + labelKey + " of the node " + nodeName)
}
}
return nil
}