Merge pull request #35129 from gmarek/generalize

Automatic merge from submit-queue

Generalize Node preparation for e2e and integration tests

@wojtek-t
This commit is contained in:
Kubernetes Submit Queue 2016-10-20 14:34:03 -07:00 committed by GitHub
commit 7a03564bb6
6 changed files with 254 additions and 30 deletions

View File

@ -4514,3 +4514,63 @@ func ListNamespaceEvents(c *client.Client, ns string) error {
}
return nil
}
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy) testutils.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}

View File

@ -0,0 +1,103 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}

View File

@ -19,6 +19,11 @@ package benchmark
import (
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
@ -53,8 +58,17 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
defer finalFunc()
c := schedulerConfigFactory.Client
makeNodes(c, numNodes)
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
map[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
makePodsFromRC(c, "rc1", numScheduledPods)
for {
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods {

View File

@ -21,6 +21,11 @@ import (
"math"
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
@ -76,7 +81,15 @@ func schedulePods(numNodes, numPods int) int32 {
defer destroyFunc()
c := schedulerConfigFactory.Client
makeNodes(c, numNodes)
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
map[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
makePodsFromRC(c, "rc1", numPods)
prev := 0

View File

@ -80,34 +80,6 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro
return
}
func makeNodes(c clientset.Interface, nodeCount int) {
glog.Infof("making %d nodes", nodeCount)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: "scheduler-test-node-",
},
Spec: api.NodeSpec{
ExternalID: "foobar",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < nodeCount; i++ {
if _, err := c.Core().Nodes().Create(baseNode); err != nil {
panic("error creating node: " + err.Error())
}
}
}
func makePodSpec() api.PodSpec {
return api.PodSpec{
Containers: []api.Container{{

View File

@ -23,9 +23,11 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
@ -605,3 +607,63 @@ waitLoop:
}
return nil
}
type TestNodePreparer interface {
PrepareNodes() error
CleanupNodes() error
}
type PrepareNodeStrategy interface {
PreparePatch(node *api.Node) []byte
CleanupNode(node *api.Node) *api.Node
}
type TrivialNodePrepareStrategy struct{}
func (*TrivialNodePrepareStrategy) PreparePatch(*api.Node) []byte {
return []byte{}
}
func (*TrivialNodePrepareStrategy) CleanupNode(node *api.Node) *api.Node {
nodeCopy := *node
return &nodeCopy
}
func DoPrepareNode(client clientset.Interface, node *api.Node, strategy PrepareNodeStrategy) error {
var err error
patch := strategy.PreparePatch(node)
if len(patch) == 0 {
return nil
}
for attempt := 0; attempt < retries; attempt++ {
if _, err = client.Core().Nodes().Patch(node.Name, api.MergePatchType, []byte(patch)); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
}
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
for attempt := 0; attempt < retries; attempt++ {
node, err := client.Core().Nodes().Get(nodeName)
if err != nil {
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
}
updatedNode := strategy.CleanupNode(node)
if api.Semantic.DeepEqual(node, updatedNode) {
return nil
}
if _, err = client.Core().Nodes().Update(updatedNode); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
}