mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Cleanup unused test functions - cont-ed
Following up the work started in 0c0bd6d
this is further cleaning up the
test/utils directory getting rid of unused functions.
This commit is contained in:
parent
89283e0219
commit
ce01dfc492
@ -128,55 +128,6 @@ func CheckAuditLinesFiltered(stream io.Reader, expected []AuditEvent, version sc
|
||||
return missingReport, nil
|
||||
}
|
||||
|
||||
// CheckAuditList searches an audit event list for the expected audit events.
|
||||
func CheckAuditList(el auditinternal.EventList, expected []AuditEvent) (missing []AuditEvent, err error) {
|
||||
expectations := newAuditEventTracker(expected)
|
||||
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
if err != nil {
|
||||
return expected, err
|
||||
}
|
||||
|
||||
expectations.Mark(event)
|
||||
}
|
||||
|
||||
return expectations.Missing(), nil
|
||||
}
|
||||
|
||||
// CheckForDuplicates checks a list for duplicate events
|
||||
func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, error) {
|
||||
// existingEvents holds a slice of audit events that have been seen
|
||||
existingEvents := []AuditEvent{}
|
||||
duplicates := auditinternal.EventList{}
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
if err != nil {
|
||||
return duplicates, err
|
||||
}
|
||||
event.ID = e.AuditID
|
||||
for _, existing := range existingEvents {
|
||||
if reflect.DeepEqual(existing, event) {
|
||||
duplicates.Items = append(duplicates.Items, e)
|
||||
continue
|
||||
}
|
||||
}
|
||||
existingEvents = append(existingEvents, event)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(duplicates.Items) > 0 {
|
||||
err = fmt.Errorf("failed duplicate check")
|
||||
}
|
||||
|
||||
return duplicates, err
|
||||
}
|
||||
|
||||
// testEventFromInternal takes an internal audit event and returns a test event
|
||||
func testEventFromInternal(e *auditinternal.Event) (AuditEvent, error) {
|
||||
return testEventFromInternalFiltered(e, nil)
|
||||
}
|
||||
|
||||
// testEventFromInternalFiltered takes an internal audit event and returns a test event, customAnnotationsFilter
|
||||
// controls which audit annotations are added to AuditEvent.CustomAuditAnnotations.
|
||||
// If the customAnnotationsFilter is nil, AuditEvent.CustomAuditAnnotations will be empty.
|
||||
|
@ -103,13 +103,3 @@ func TerminatedContainers(pod *v1.Pod) map[string]string {
|
||||
}
|
||||
return states
|
||||
}
|
||||
|
||||
// PodNotReady checks whether pod p's has a ready condition of status false.
|
||||
func PodNotReady(p *v1.Pod) (bool, error) {
|
||||
// Check the ready condition is false.
|
||||
if podutil.IsPodReady(p) {
|
||||
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
|
||||
p.ObjectMeta.Name, p.Spec.NodeName, v1.PodReady, v1.ConditionFalse, p.Status.Conditions)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -24,9 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -121,74 +118,6 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *apps.DaemonSet) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Job) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Secret) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1.ConfigMap) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.Service) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
@ -206,40 +135,6 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateStorageClassWithRetries(c clientset.Interface, obj *storage.StorageClass) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj *v1.ResourceQuota) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.PersistentVolume) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -56,14 +55,3 @@ func DeleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
return fmt.Errorf("unsupported kind when deleting: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := DeleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(deleteFunc)
|
||||
}
|
||||
|
@ -328,25 +328,6 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
||||
}, desiredGeneration, 2*time.Second, 1*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Annotations[apps.DeprecatedRollbackTo] == "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
|
||||
var deployment *apps.Deployment
|
||||
|
@ -67,26 +67,3 @@ func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaS
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.ReplicaSet, error) {
|
||||
var rs *apps.ReplicaSet
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}); err == nil {
|
||||
logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if wait.Interrupted(pollErr) {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided update to replicaset %q: %v", name, updateErr)
|
||||
}
|
||||
return rs, pollErr
|
||||
}
|
||||
|
@ -40,12 +40,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
@ -299,22 +296,6 @@ func RunDeployment(ctx context.Context, config DeploymentConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) Run(ctx context.Context) error {
|
||||
return RunDeployment(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetKind() schema.GroupKind {
|
||||
return extensionsinternal.Kind("Deployment")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) create() error {
|
||||
deployment := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -386,22 +367,6 @@ func RunReplicaSet(ctx context.Context, config ReplicaSetConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) Run(ctx context.Context) error {
|
||||
return RunReplicaSet(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetKind() schema.GroupKind {
|
||||
return extensionsinternal.Kind("ReplicaSet")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) create() error {
|
||||
rs := &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -469,55 +434,6 @@ func RunRC(ctx context.Context, config RCConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *RCConfig) Run(ctx context.Context) error {
|
||||
return RunRC(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetName() string {
|
||||
return config.Name
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetNamespace() string {
|
||||
return config.Namespace
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetKind() schema.GroupKind {
|
||||
return api.Kind("ReplicationController")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupResource() schema.GroupResource {
|
||||
return api.Resource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return api.SchemeGroupVersion.WithResource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetClient() clientset.Interface {
|
||||
return config.Client
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter {
|
||||
return config.ScalesGetter
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetClient(c clientset.Interface) {
|
||||
config.Client = c
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) {
|
||||
config.ScalesGetter = getter
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetReplicas() int {
|
||||
return config.Replicas
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetLabelValue(key string) (string, bool) {
|
||||
value, found := config.Labels[key]
|
||||
return value, found
|
||||
}
|
||||
|
||||
func (config *RCConfig) create() error {
|
||||
dnsDefault := v1.DNSDefault
|
||||
if config.DNSPolicy == nil {
|
||||
@ -1505,93 +1421,3 @@ func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name stri
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type DaemonConfig struct {
|
||||
Client clientset.Interface
|
||||
Name string
|
||||
Namespace string
|
||||
Image string
|
||||
// If set this function will be used to print log lines instead of klog.
|
||||
LogFunc func(fmt string, args ...interface{})
|
||||
// How long we wait for DaemonSet to become running.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func (config *DaemonConfig) Run(ctx context.Context) error {
|
||||
if config.Image == "" {
|
||||
config.Image = imageutils.GetE2EImage(imageutils.Pause)
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
}
|
||||
daemon := &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Name,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: nameLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := CreateDaemonSetWithRetries(config.Client, config.Namespace, daemon); err != nil {
|
||||
return fmt.Errorf("error creating daemonset: %v", err)
|
||||
}
|
||||
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
for i := 0; i < retries; i++ {
|
||||
// Wait for all daemons to be running
|
||||
nodes, err = config.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err == nil {
|
||||
break
|
||||
} else if i+1 == retries {
|
||||
return fmt.Errorf("error listing Nodes while waiting for DaemonSet %v: %v", config.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
timeout := config.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
ps, err := NewPodStore(config.Client, config.Namespace, labels.SelectorFromSet(nameLabel), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ps.Stop()
|
||||
|
||||
err = wait.Poll(time.Second, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
|
||||
nodeHasDaemon := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
podReady, _ := PodRunningReady(pod)
|
||||
if pod.Spec.NodeName != "" && podReady {
|
||||
nodeHasDaemon.Insert(pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
running := len(nodeHasDaemon)
|
||||
config.LogFunc("Found %v/%v Daemons %v running", running, config.Name, len(nodes.Items))
|
||||
return running == len(nodes.Items), nil
|
||||
})
|
||||
if err != nil {
|
||||
config.LogFunc("Timed out while waiting for DaemonSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
} else {
|
||||
config.LogFunc("Created Daemon %v/%v", config.Namespace, config.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user