Use log functions of core framework on test/e2e/scheduling

This commit is contained in:
s-ito-ts 2019-08-27 02:57:35 +00:00
parent 5183669719
commit 8745f02015
10 changed files with 104 additions and 114 deletions

View File

@ -46,7 +46,6 @@ go_library(
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",

View File

@ -33,7 +33,6 @@ import (
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"github.com/onsi/ginkgo"
@ -86,10 +85,10 @@ var _ = SIGDescribe("LimitRange", func() {
if err == nil {
select {
case listCompleted <- true:
e2elog.Logf("observed the limitRanges list")
framework.Logf("observed the limitRanges list")
return limitRanges, err
default:
e2elog.Logf("channel blocked")
framework.Logf("channel blocked")
}
}
return limitRanges, err
@ -112,13 +111,13 @@ var _ = SIGDescribe("LimitRange", func() {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
e2elog.Failf("Failed to observe limitRange creation : %v", event)
framework.Failf("Failed to observe limitRange creation : %v", event)
}
case <-time.After(e2eservice.RespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange creation")
framework.Failf("Timeout while waiting for LimitRange creation")
}
case <-time.After(e2eservice.RespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange list complete")
framework.Failf("Timeout while waiting for LimitRange list complete")
}
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
@ -141,7 +140,7 @@ var _ = SIGDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
framework.Logf("Pod %+v does not have the expected requirements", pod)
framework.ExpectNoError(err)
}
}
@ -162,7 +161,7 @@ var _ = SIGDescribe("LimitRange", func() {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
framework.Logf("Pod %+v does not have the expected requirements", pod)
framework.ExpectNoError(err)
}
}
@ -212,18 +211,18 @@ var _ = SIGDescribe("LimitRange", func() {
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
if err != nil {
e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
framework.Logf("Unable to retrieve LimitRanges: %v", err)
return false, nil
}
if len(limitRanges.Items) == 0 {
e2elog.Logf("limitRange is already deleted")
framework.Logf("limitRange is already deleted")
return true, nil
}
if len(limitRanges.Items) > 0 {
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
e2elog.Logf("deletion has not yet been observed")
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
@ -244,12 +243,12 @@ var _ = SIGDescribe("LimitRange", func() {
})
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests)
if err != nil {
return err
}
e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
err = equalResourceList(expected.Limits, actual.Limits)
return err
}

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
@ -87,25 +86,25 @@ func logOSImages(f *framework.Framework) {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
e2elog.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
}
}
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
e2elog.Logf("Getting list of Nodes from API server")
framework.Logf("Getting list of Nodes from API server")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
for _, node := range nodeList.Items {
if node.Spec.Unschedulable {
continue
}
e2elog.Logf("gpuResourceName %s", gpuResourceName)
framework.Logf("gpuResourceName %s", gpuResourceName)
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
e2elog.Logf("Nvidia GPUs not available on Node: %q", node.Name)
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
return false
}
}
e2elog.Logf("Nvidia GPUs exist on all schedulable nodes")
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
return true
}
@ -133,34 +132,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
}
gpuResourceName = gpu.NVIDIAGPUResourceName
e2elog.Logf("Using %v", dsYamlURL)
framework.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlURL)
framework.ExpectNoError(err)
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
e2elog.Logf("Successfully created daemonset to install Nvidia drivers.")
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
pods, err := e2epod.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
devicepluginPods, err := e2epod.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
if err == nil {
e2elog.Logf("Adding deviceplugin addon pod.")
framework.Logf("Adding deviceplugin addon pod.")
pods.Items = append(pods.Items, devicepluginPods.Items...)
}
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
e2elog.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}
// Wait for Nvidia GPUs to be available on nodes
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
gomega.Eventually(func() bool {
return areGPUsAvailableOnAllSchedulableNodes(f)
}, driverInstallTimeout, time.Second).Should(gomega.BeTrue())
@ -182,19 +181,19 @@ func getGPUsPerPod() int64 {
func testNvidiaGPUs(f *framework.Framework) {
rsgather := SetupNVIDIAGPUNode(f, true)
gpuPodNum := getGPUsAvailable(f) / getGPUsPerPod()
e2elog.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
podList := []*v1.Pod{}
for i := int64(0); i < gpuPodNum; i++ {
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
}
e2elog.Logf("Wait for all test pods to succeed")
framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed
for _, pod := range podList {
f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute)
logContainers(f, pod)
}
e2elog.Logf("Stopping ResourceUsageGather")
framework.Logf("Stopping ResourceUsageGather")
constraints := make(map[string]framework.ResourceConstraint)
// For now, just gets summary. Can pass valid constraints in the future.
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
@ -206,7 +205,7 @@ func logContainers(f *framework.Framework, pod *v1.Pod) {
for _, container := range pod.Spec.Containers {
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, container.Name)
framework.ExpectNoError(err, "Should be able to get container logs for container: %s", container.Name)
e2elog.Logf("Got container logs for %s:\n%v", container.Name, logs)
framework.Logf("Got container logs for %s:\n%v", container.Name, logs)
}
}
@ -273,7 +272,7 @@ func StartJob(f *framework.Framework, completions int32) {
ns := f.Namespace.Name
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
framework.ExpectNoError(err)
e2elog.Logf("Created job %v", testJob)
framework.Logf("Created job %v", testJob)
}
// VerifyJobNCompletions verifies that the job has completions number of successful pods
@ -283,7 +282,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
framework.ExpectNoError(err)
createdPods := pods.Items
createdPodNames := podNames(createdPods)
e2elog.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
framework.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
successes := int32(0)
for _, podName := range createdPodNames {
@ -296,7 +295,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
}
}
if successes != completions {
e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions)
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions)
}
}

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -95,11 +94,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
@ -109,7 +108,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
e2ekubelet.PrintAllKubeletPods(cs, node.Name)
}
@ -124,7 +123,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
totalPodCapacity = 0
for _, node := range nodeList.Items {
e2elog.Logf("Node: %v", node)
framework.Logf("Node: %v", node)
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
framework.ExpectEqual(found, true)
totalPodCapacity += podCapacity.Value()
@ -144,7 +143,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
}), true, e2elog.Logf))
}), true, framework.Logf))
}
podName := "additional-pod"
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
@ -179,7 +178,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
e2elog.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
framework.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
}
}
@ -189,9 +188,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ephemeralStoragePerPod = nodeMaxAllocatable / maxNumberOfPods
e2elog.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
framework.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
for name, leftAllocatable := range nodeToAllocatableMap {
e2elog.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
framework.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
podsNeededForSaturation += (int)(leftAllocatable / ephemeralStoragePerPod)
}
@ -214,7 +213,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
},
},
}), true, e2elog.Logf))
}), true, framework.Logf))
}
podName := "additional-pod"
conf := pausePodConfig{
@ -284,7 +283,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
for _, pod := range pods.Items {
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
}
}
@ -294,7 +293,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
fillerPods := []*v1.Pod{}
for nodeName, cpu := range nodeToAllocatableMap {
requestedCPU := cpu * 7 / 10
e2elog.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
framework.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
Name: "filler-pod-" + string(uuid.NewUUID()),
Resources: &v1.ResourceRequirements{

View File

@ -34,7 +34,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
@ -86,7 +85,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
@ -126,7 +125,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Requests: podRes,
},
})
e2elog.Logf("Created pod: %v", pods[i].Name)
framework.Logf("Created pod: %v", pods[i].Name)
}
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
@ -186,7 +185,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
Requests: podRes,
},
})
e2elog.Logf("Created pod: %v", pods[i].Name)
framework.Logf("Created pod: %v", pods[i].Name)
}
ginkgo.By("Wait for pods to be scheduled.")
for _, pod := range pods {
@ -254,7 +253,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
framework.ExpectNoError(err)
}()
gomega.Expect(pod.Spec.Priority).NotTo(gomega.BeNil())
e2elog.Logf("Created pod: %v", pod.Name)
framework.Logf("Created pod: %v", pod.Name)
}
})
})
@ -277,11 +276,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
// list existing priorities
priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("Unable to list priorities: %v", err)
framework.Logf("Unable to list priorities: %v", err)
} else {
e2elog.Logf("List existing priorities:")
framework.Logf("List existing priorities:")
for _, p := range priorityList.Items {
e2elog.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp)
framework.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp)
}
}
}
@ -306,18 +305,18 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
// find an available node
ginkgo.By("Finding an available node")
nodeName := GetNodeThatCanRunPod(f)
e2elog.Logf("found a healthy node: %s", nodeName)
framework.Logf("found a healthy node: %s", nodeName)
// get the node API object
var err error
node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
e2elog.Failf("error getting node %q: %v", nodeName, err)
framework.Failf("error getting node %q: %v", nodeName, err)
}
var ok bool
nodeHostNameLabel, ok = node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
if !ok {
e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
}
// update Node API object with a fake resource
@ -335,8 +334,8 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal})
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
if err != nil {
e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
}
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
}
@ -435,16 +434,16 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
runPauseRS(f, rsConfs[i])
}
e2elog.Logf("pods created so far: %v", podNamesSeen)
e2elog.Logf("length of pods created so far: %v", len(podNamesSeen))
framework.Logf("pods created so far: %v", podNamesSeen)
framework.Logf("length of pods created so far: %v", len(podNamesSeen))
// create ReplicaSet4
// if runPauseRS failed, it means ReplicaSet4 cannot be scheduled even after 1 minute
// which is unacceptable
runPauseRS(f, rsConfs[rsNum-1])
e2elog.Logf("pods created so far: %v", podNamesSeen)
e2elog.Logf("length of pods created so far: %v", len(podNamesSeen))
framework.Logf("pods created so far: %v", podNamesSeen)
framework.Logf("length of pods created so far: %v", len(podNamesSeen))
// count pods number of ReplicaSet{1,2,3}, if it's more than expected replicas
// then it denotes its pods have been over-preempted
@ -463,7 +462,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
for i, got := range rsPodsSeen {
expected := maxRSPodsSeen[i]
if got > expected {
e2elog.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got)
framework.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got)
}
}
})

View File

@ -36,7 +36,6 @@ import (
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -82,7 +81,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
@ -165,7 +164,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
e2elog.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
}
}()
@ -299,7 +298,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
Requests: needCreateResource,
},
NodeName: node.Name,
}), true, e2elog.Logf)
}), true, framework.Logf)
if err != nil {
return err
@ -315,16 +314,16 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name)
framework.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
e2elog.Failf("Expect error of invalid, got : %v", err)
framework.Failf("Expect error of invalid, got : %v", err)
}
for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name {
e2elog.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
@ -350,8 +349,8 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
memFraction = floatOne
}
e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
framework.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
return cpuFraction, memFraction
}

View File

@ -27,7 +27,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -122,7 +121,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()})
if err != nil || len(nodeList.Items) != 1 {
e2elog.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
framework.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
}
node := nodeList.Items[0]
@ -142,7 +141,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
}
if ginkgo.CurrentGinkgoTestDescription().Failed {
e2elog.Failf("Current e2e test has failed, so return from here.")
framework.Failf("Current e2e test has failed, so return from here.")
return
}
@ -159,7 +158,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
e2elog.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
}
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30)
@ -191,7 +190,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations)
framework.ExpectNoError(err)
if seconds != 200 {
e2elog.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
framework.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
}
})
})

View File

@ -27,7 +27,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -142,7 +141,7 @@ func createTestController(cs clientset.Interface, observedDeletions chan string,
},
},
)
e2elog.Logf("Starting informer...")
framework.Logf("Starting informer...")
go controller.Run(stopCh)
}
@ -184,7 +183,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
@ -197,9 +196,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
e2elog.Failf("Failed to evict Pod")
framework.Failf("Failed to evict Pod")
case <-observedDeletions:
e2elog.Logf("Noticed Pod eviction. Test successful")
framework.Logf("Noticed Pod eviction. Test successful")
}
})
@ -216,7 +215,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
@ -229,9 +228,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful")
framework.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions:
e2elog.Failf("Pod was evicted despite toleration")
framework.Failf("Pod was evicted despite toleration")
}
})
@ -249,7 +248,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
@ -262,18 +261,18 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted")
framework.Logf("Pod wasn't evicted")
case <-observedDeletions:
e2elog.Failf("Pod was evicted despite toleration")
framework.Failf("Pod was evicted despite toleration")
return
}
ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
e2elog.Failf("Pod wasn't evicted")
framework.Failf("Pod wasn't evicted")
case <-observedDeletions:
e2elog.Logf("Pod was evicted after toleration time run out. Test successful")
framework.Logf("Pod was evicted after toleration time run out. Test successful")
return
}
})
@ -295,7 +294,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
// 2. Taint the node running this pod with a no-execute taint
ginkgo.By("Trying to apply a taint on the Node")
@ -314,14 +313,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C
select {
case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Proceeding")
framework.Logf("Pod wasn't evicted. Proceeding")
case <-observedDeletions:
e2elog.Failf("Pod was evicted despite toleration")
framework.Failf("Pod was evicted despite toleration")
return
}
// 4. Remove the taint
e2elog.Logf("Removing taint from Node")
framework.Logf("Removing taint from Node")
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
taintRemoved = true
@ -330,9 +329,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
select {
case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful")
framework.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions:
e2elog.Failf("Pod was evicted despite toleration")
framework.Failf("Pod was evicted despite toleration")
}
})
})
@ -367,10 +366,10 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
ginkgo.By("Starting pods...")
nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
framework.ExpectNoError(err)
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
ginkgo.By("Trying to apply a taint on the Nodes")
testTaint := getTestTaint()
@ -391,17 +390,17 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
select {
case <-timeoutChannel:
if evicted == 0 {
e2elog.Failf("Failed to evict Pod1.")
framework.Failf("Failed to evict Pod1.")
} else if evicted == 2 {
e2elog.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
framework.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
}
return
case podName := <-observedDeletions:
evicted++
if podName == podGroup+"1" {
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
framework.Logf("Noticed Pod %q gets evicted.", podName)
} else if podName == podGroup+"2" {
e2elog.Failf("Unexepected Pod %q gets evicted.", podName)
framework.Failf("Unexepected Pod %q gets evicted.", podName)
return
}
}
@ -430,10 +429,10 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
framework.ExpectNoError(err)
nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
if !ok {
e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
}
framework.ExpectNoError(err)
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName)
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName)
// ensure pod2 lands on the same node as pod1
pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeHostNameLabel}
_, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
@ -441,7 +440,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
// Wait for pods to be running state before eviction happens
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod1))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod2))
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName)
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName)
// 2. Taint the nodes running those pods with a no-execute taint
ginkgo.By("Trying to apply a taint on the Node")
@ -457,10 +456,10 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
for evicted != 2 {
select {
case <-timeoutChannel:
e2elog.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
return
case podName := <-observedDeletions:
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
framework.Logf("Noticed Pod %q gets evicted.", podName)
evicted++
}
}

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -103,7 +102,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
// test for replicaCount > 0. Otherwise, StartPods panics.
framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, e2elog.Logf))
framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf))
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
@ -211,7 +210,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
e2elog.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
)
@ -67,7 +66,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
framework.ExpectNoError(err)
e2elog.Logf("Expected zones: %v", expectedZones)
framework.Logf("Expected zones: %v", expectedZones)
// Get all the zones in this current region
region := gceCloud.Region()
@ -122,7 +121,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
defer func() {
// Teardown of the compute instance
e2elog.Logf("Deleting compute resource: %v", name)
framework.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
framework.ExpectNoError(err)
}()
@ -142,10 +141,10 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
// Defer the cleanup
defer func() {
e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
if err != nil {
e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
}