mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
remove unused code and use framework
This commit is contained in:
parent
6be12b8235
commit
c2d41fda6c
@ -41,15 +41,3 @@ func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error)
|
|||||||
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return c.BatchV1().Jobs(ns).Create(job)
|
return c.BatchV1().Jobs(ns).Create(job)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
|
||||||
// been updated.
|
|
||||||
func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
|
||||||
return c.BatchV1().Jobs(ns).Update(job)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
|
|
||||||
// deleted.
|
|
||||||
func DeleteJob(c clientset.Interface, ns, name string) error {
|
|
||||||
return c.BatchV1().Jobs(ns).Delete(name, nil)
|
|
||||||
}
|
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -157,17 +157,17 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
|
|||||||
resumedPod := ""
|
resumedPod := ""
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
if pod.Status.Phase != v1.PodRunning {
|
if pod.Status.Phase != v1.PodRunning {
|
||||||
e2efwk.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
framework.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||||
}
|
}
|
||||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if resumedPod != "" {
|
if resumedPod != "" {
|
||||||
e2efwk.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
framework.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||||
}
|
}
|
||||||
_, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
_, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2efwk.Logf("Resumed pod %v", pod.Name)
|
framework.Logf("Resumed pod %v", pod.Name)
|
||||||
resumedPod = pod.Name
|
resumedPod = pod.Name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/manifest"
|
"k8s.io/kubernetes/test/e2e/manifest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,20 +41,20 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
|
|||||||
return filepath.Join(manifestPath, file)
|
return filepath.Join(manifestPath, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
e2efwk.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
framework.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2efwk.Logf("Parsing service from %v", mkpath("service.yaml"))
|
framework.Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e2efwk.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
framework.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||||
_, err = c.CoreV1().Services(ns).Create(svc)
|
_, err = c.CoreV1().Services(ns).Create(svc)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e2efwk.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||||
_, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
_, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
@ -62,16 +62,16 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
|
|||||||
// GetPodList gets the current Pods in ss.
|
// GetPodList gets the current Pods in ss.
|
||||||
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList {
|
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList {
|
||||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return podList
|
return podList
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
||||||
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||||
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Scale down each statefulset, then delete it completely.
|
// Scale down each statefulset, then delete it completely.
|
||||||
// Deleting a pvc without doing this will leak volumes, #25101.
|
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||||
@ -83,7 +83,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
|||||||
errList = append(errList, fmt.Sprintf("%v", err))
|
errList = append(errList, fmt.Sprintf("%v", err))
|
||||||
}
|
}
|
||||||
WaitForStatusReplicas(c, ss, 0)
|
WaitForStatusReplicas(c, ss, 0)
|
||||||
e2efwk.Logf("Deleting statefulset %v", ss.Name)
|
framework.Logf("Deleting statefulset %v", ss.Name)
|
||||||
// Use OrphanDependents=false so it's deleted synchronously.
|
// Use OrphanDependents=false so it's deleted synchronously.
|
||||||
// We already made sure the Pods are gone inside Scale().
|
// We already made sure the Pods are gone inside Scale().
|
||||||
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||||
@ -97,13 +97,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
|||||||
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2efwk.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
for _, pvc := range pvcList.Items {
|
for _, pvc := range pvcList.Items {
|
||||||
pvNames.Insert(pvc.Spec.VolumeName)
|
pvNames.Insert(pvc.Spec.VolumeName)
|
||||||
// TODO: Double check that there are no pods referencing the pvc
|
// TODO: Double check that there are no pods referencing the pvc
|
||||||
e2efwk.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||||
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -117,7 +117,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
|||||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||||
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2efwk.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
waitingFor := []string{}
|
waitingFor := []string{}
|
||||||
@ -129,14 +129,14 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
|||||||
if len(waitingFor) == 0 {
|
if len(waitingFor) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
e2efwk.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if pollErr != nil {
|
if pollErr != nil {
|
||||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||||
}
|
}
|
||||||
if len(errList) != 0 {
|
if len(errList) != 0 {
|
||||||
e2efwk.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
framework.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +145,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.
|
|||||||
name := ss.Name
|
name := ss.Name
|
||||||
ns := ss.Namespace
|
ns := ss.Namespace
|
||||||
|
|
||||||
e2efwk.Logf("Scaling statefulset %s to %d", name, count)
|
framework.Logf("Scaling statefulset %s to %d", name, count)
|
||||||
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||||
|
|
||||||
var statefulPodList *v1.PodList
|
var statefulPodList *v1.PodList
|
||||||
@ -178,7 +178,7 @@ func UpdateReplicas(c clientset.Interface, ss *appsv1.StatefulSet, count int32)
|
|||||||
func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
|
func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||||
oldReplicas := *(ss.Spec.Replicas)
|
oldReplicas := *(ss.Spec.Replicas)
|
||||||
ss, err := Scale(c, ss, 0)
|
ss, err := Scale(c, ss, 0)
|
||||||
e2efwk.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
// Wait for controller to report the desired number of Pods.
|
// Wait for controller to report the desired number of Pods.
|
||||||
// This way we know the controller has observed all Pod deletions
|
// This way we know the controller has observed all Pod deletions
|
||||||
// before we scale it back up.
|
// before we scale it back up.
|
||||||
@ -191,7 +191,7 @@ func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
|||||||
cmd := "printf $(hostname)"
|
cmd := "printf $(hostname)"
|
||||||
podList := GetPodList(c, ss)
|
podList := GetPodList(c, ss)
|
||||||
for _, statefulPod := range podList.Items {
|
for _, statefulPod := range podList.Items {
|
||||||
hostname, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
hostname, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -221,7 +221,7 @@ func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string)
|
|||||||
|
|
||||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||||
func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
||||||
e2efwk.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
framework.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||||
|
|
||||||
if expectedServiceName != ss.Spec.ServiceName {
|
if expectedServiceName != ss.Spec.ServiceName {
|
||||||
return fmt.Errorf("wrong service name governing statefulset. Expected %s got %s",
|
return fmt.Errorf("wrong service name governing statefulset. Expected %s got %s",
|
||||||
@ -235,8 +235,8 @@ func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error
|
|||||||
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
|
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
|
||||||
podList := GetPodList(c, ss)
|
podList := GetPodList(c, ss)
|
||||||
for _, statefulPod := range podList.Items {
|
for _, statefulPod := range podList.Items {
|
||||||
stdout, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
stdout, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||||
e2efwk.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -249,7 +249,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
|
|||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2efwk.Failf("failed to get statefulset %q: %v", name, err)
|
framework.Failf("failed to get statefulset %q: %v", name, err)
|
||||||
}
|
}
|
||||||
update(ss)
|
update(ss)
|
||||||
ss, err = c.AppsV1().StatefulSets(ns).Update(ss)
|
ss, err = c.AppsV1().StatefulSets(ns).Update(ss)
|
||||||
@ -257,9 +257,9 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
|
|||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||||
e2efwk.Failf("failed to update statefulset %q: %v", name, err)
|
framework.Failf("failed to update statefulset %q: %v", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e2efwk.Failf("too many retries draining statefulset %q", name)
|
framework.Failf("too many retries draining statefulset %q", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user