Merge pull request #86831 from tanjunchen/remove-unused-code-use-framework

remove unused code and use framework
This commit is contained in:
Kubernetes Prow Robot 2020-01-05 03:25:40 -08:00 committed by GitHub
commit 94fa017ee2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 33 additions and 45 deletions

View File

@ -41,15 +41,3 @@ func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error)
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
return c.BatchV1().Jobs(ns).Create(job)
}
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
// been updated.
func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
return c.BatchV1().Jobs(ns).Update(job)
}
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
// deleted.
func DeleteJob(c clientset.Interface, ns, name string) error {
return c.BatchV1().Jobs(ns).Delete(name, nil)
}

View File

@ -29,7 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2efwk "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -157,17 +157,17 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
resumedPod := ""
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
e2efwk.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
framework.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
}
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
continue
}
if resumedPod != "" {
e2efwk.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
framework.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
}
_, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
e2efwk.ExpectNoError(err)
e2efwk.Logf("Resumed pod %v", pod.Name)
_, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
framework.ExpectNoError(err)
framework.Logf("Resumed pod %v", pod.Name)
resumedPod = pod.Name
}
}

View File

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2efwk "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
)
@ -41,20 +41,20 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
return filepath.Join(manifestPath, file)
}
e2efwk.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
framework.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
e2efwk.ExpectNoError(err)
e2efwk.Logf("Parsing service from %v", mkpath("service.yaml"))
framework.ExpectNoError(err)
framework.Logf("Parsing service from %v", mkpath("service.yaml"))
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
e2efwk.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
framework.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
_, err = c.CoreV1().Services(ns).Create(svc)
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
e2efwk.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
_, err = c.AppsV1().StatefulSets(ns).Create(ss)
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
return ss
}
@ -62,16 +62,16 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
// GetPodList gets the current Pods in ss.
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList {
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
return podList
}
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
// Deleting a pvc without doing this will leak volumes, #25101.
@ -83,7 +83,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
errList = append(errList, fmt.Sprintf("%v", err))
}
WaitForStatusReplicas(c, ss, 0)
e2efwk.Logf("Deleting statefulset %v", ss.Name)
framework.Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
@ -97,13 +97,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
e2efwk.Logf("WARNING: Failed to list pvcs, retrying %v", err)
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc
e2efwk.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil
}
@ -117,7 +117,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
e2efwk.Logf("WARNING: Failed to list pvs, retrying %v", err)
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
}
waitingFor := []string{}
@ -129,14 +129,14 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
if len(waitingFor) == 0 {
return true, nil
}
e2efwk.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil
})
if pollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
}
if len(errList) != 0 {
e2efwk.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
framework.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
}
}
@ -145,7 +145,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.
name := ss.Name
ns := ss.Namespace
e2efwk.Logf("Scaling statefulset %s to %d", name, count)
framework.Logf("Scaling statefulset %s to %d", name, count)
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList
@ -178,7 +178,7 @@ func UpdateReplicas(c clientset.Interface, ss *appsv1.StatefulSet, count int32)
func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
oldReplicas := *(ss.Spec.Replicas)
ss, err := Scale(c, ss, 0)
e2efwk.ExpectNoError(err)
framework.ExpectNoError(err)
// Wait for controller to report the desired number of Pods.
// This way we know the controller has observed all Pod deletions
// before we scale it back up.
@ -191,7 +191,7 @@ func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error {
cmd := "printf $(hostname)"
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
hostname, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
hostname, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
if err != nil {
return err
}
@ -221,7 +221,7 @@ func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string)
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
e2efwk.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
framework.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName {
return fmt.Errorf("wrong service name governing statefulset. Expected %s got %s",
@ -235,8 +235,8 @@ func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
stdout, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
e2efwk.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
stdout, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil {
return err
}
@ -249,7 +249,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
for i := 0; i < 3; i++ {
ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
e2efwk.Failf("failed to get statefulset %q: %v", name, err)
framework.Failf("failed to get statefulset %q: %v", name, err)
}
update(ss)
ss, err = c.AppsV1().StatefulSets(ns).Update(ss)
@ -257,9 +257,9 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
return ss
}
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
e2efwk.Failf("failed to update statefulset %q: %v", name, err)
framework.Failf("failed to update statefulset %q: %v", name, err)
}
}
e2efwk.Failf("too many retries draining statefulset %q", name)
framework.Failf("too many retries draining statefulset %q", name)
return nil
}