Add test for image in node status

This commit is contained in:
Harry Zhang 2016-05-11 07:59:54 -04:00
parent 032a886320
commit d917ed2638
4 changed files with 125 additions and 27 deletions

View File

@ -3100,9 +3100,9 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
glog.Errorf("Error getting image list: %v", err)
} else {
// sort the images from max to min, and only set top N images into the node status.
sort.Sort(ByImageSize(containerImages))
sort.Sort(byImageSize(containerImages))
if maxImagesInNodeStatus < len(containerImages) {
containerImages = containerImages[0 : maxImagesInNodeStatus-1]
containerImages = containerImages[0:maxImagesInNodeStatus]
}
for _, image := range containerImages {
@ -3121,14 +3121,14 @@ func (kl *Kubelet) setNodeStatusGoRuntime(node *api.Node) {
node.Status.NodeInfo.Architecture = goRuntime.GOARCH
}
type ByImageSize []kubecontainer.Image
type byImageSize []kubecontainer.Image
// Sort from max to min
func (a ByImageSize) Less(i, j int) bool {
func (a byImageSize) Less(i, j int) bool {
return a[i].Size > a[j].Size
}
func (a ByImageSize) Len() int { return len(a) }
func (a ByImageSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byImageSize) Len() int { return len(a) }
func (a byImageSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Set status for the node.
func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {

View File

@ -26,6 +26,7 @@ import (
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
@ -67,6 +68,7 @@ import (
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/rand"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
@ -80,10 +82,19 @@ func init() {
utilruntime.ReallyCrash = true
}
const testKubeletHostname = "127.0.0.1"
const (
testKubeletHostname = "127.0.0.1"
const testReservationCPU = "200m"
const testReservationMemory = "100M"
testReservationCPU = "200m"
testReservationMemory = "100M"
maxImageTagsForTest = 3
// TODO(harry) any global place for these two?
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
minImgSize int64 = 23 * 1024 * 1024
maxImgSize int64 = 1000 * 1024 * 1024
)
type fakeHTTP struct {
url string
@ -105,11 +116,9 @@ type TestKubelet struct {
mounter mount.Interface
}
// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T) *TestKubelet {
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = []kubecontainer.Image{
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
@ -121,6 +130,53 @@ func newTestKubelet(t *testing.T) *TestKubelet {
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList)
}
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(util.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
// 1. sort the imageList by size
sort.Sort(byImageSize(imageList))
// 2. convert sorted imageList to api.ContainerImage list
var expectedImageList []api.ContainerImage
for _, kubeImage := range imageList {
apiImage := api.ContainerImage{
Names: kubeImage.RepoTags,
SizeBytes: kubeImage.Size,
}
expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
return imageList, expectedImageList[0:maxImagesInNodeStatus]
}
func generateImageTags() []string {
var tagList []string
count := rand.IntnRange(1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
}
return tagList
}
func newTestKubeletWithImageList(t *testing.T, imageList []kubecontainer.Image) *TestKubelet {
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = imageList
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &fake.Clientset{}
kubelet := &Kubelet{}
@ -2349,7 +2405,9 @@ func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, ro
}
func TestUpdateNewNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
// generate one more than maxImagesInNodeStatus in inputImageList
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
testKubelet := newTestKubeletWithImageList(t, inputImageList)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
@ -2434,16 +2492,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
Images: expectedImageList,
},
}
@ -2478,9 +2527,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
}
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {

View File

@ -32,7 +32,7 @@ var rng = struct {
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
}
// Intn generates an integer in range 0->max.
// Intn generates an integer in range [0,max).
// By design this should panic if input is invalid, <= 0.
func Intn(max int) int {
rng.Lock()
@ -40,6 +40,22 @@ func Intn(max int) int {
return rng.rand.Intn(max)
}
// IntnRange generates an integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func IntnRange(min, max int) int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Intn(max-min) + min
}
// IntnRange generates an int64 integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func Int63nRange(min, max int64) int64 {
rng.Lock()
defer rng.Unlock()
return rng.rand.Int63n(max-min) + min
}
// Seed seeds the rng with the provided seed.
func Seed(seed int64) {
rng.Lock()

View File

@ -24,6 +24,10 @@ import (
"testing"
)
const (
maxRangeTestCount = 500
)
func TestString(t *testing.T) {
valid := "0123456789abcdefghijklmnopqrstuvwxyz"
for _, l := range []int{0, 1, 2, 10, 123} {
@ -84,3 +88,27 @@ func TestShuffle(t *testing.T) {
t.Errorf("Shuffle(%v) => %v, want %v", have, got, want)
}
}
func TestIntnRange(t *testing.T) {
// 0 is invalid.
for min, max := range map[int]int{1: 2, 10: 123, 100: 500} {
for i := 0; i < maxRangeTestCount; i++ {
inrange := IntnRange(min, max)
if inrange < min || inrange >= max {
t.Errorf("%v out of range (%v,%v)", inrange, min, max)
}
}
}
}
func TestInt63nRange(t *testing.T) {
// 0 is invalid.
for min, max := range map[int64]int64{1: 2, 10: 123, 100: 500} {
for i := 0; i < maxRangeTestCount; i++ {
inrange := Int63nRange(min, max)
if inrange < min || inrange >= max {
t.Errorf("%v out of range (%v,%v)", inrange, min, max)
}
}
}
}