Add --chunk-size support to kubectl describe

This commit is contained in:
Katrina Verey 2021-04-20 11:24:35 -07:00
parent 2342ac3cfa
commit 8ad6fd6ddb
13 changed files with 344 additions and 68 deletions

View File

@ -260,6 +260,62 @@ kube::test::describe_resource_events_assert() {
fi
}
kube::test::describe_resource_chunk_size_assert() {
# $1: the target resource
local resource=$1
# $2: comma-separated list of additional resources that will be listed
local additionalResources=${2:-}
# Remaining args are flags to pass to kubectl
local args=${3:-}
# Expect list requests for the target resource and the additional resources
local expectLists
IFS="," read -r -a expectLists <<< "${resource},${additionalResources}"
# Default chunk size
defaultResult=$(eval kubectl describe "${resource}" --show-events=true -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
for r in "${expectLists[@]}"; do
if grep -q "${r}?.*limit=500" <<< "${defaultResult}"; then
echo "query for ${r} had limit param"
else
echo "${bold}${red}"
echo "FAIL!"
echo "Describe ${resource}"
echo " Expected limit param on request for: ${r}"
echo " Not found in:"
echo "${defaultResult}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
done
# Try a non-default chunk size
customResult=$(eval kubectl describe "${resource}" --show-events=false --chunk-size=10 -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
if grep -q "${resource}?limit=10" <<< "${customResult}"; then
echo "query for ${resource} had user-specified limit param"
else
echo "${bold}${red}"
echo "FAIL!"
echo "Describe ${resource}"
echo " Expected limit param on request for: ${r}"
echo " Not found in:"
echo "${customResult}"
echo "${reset}${red}"
caller
echo "${reset}"
return 1
fi
echo -n "${green}"
echo "Successful describe ${resource} verbose logs:"
echo "${defaultResult}"
echo -n "${reset}"
return 0
}
# Compare sort-by resource name output (first column, skipping first line) with expected order specify in the last parameter
kube::test::if_sort_by_has_correct_order() {
local var

View File

@ -92,6 +92,7 @@ func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions.
FilenameOptions: &resource.FilenameOptions{},
DescriberSettings: &describe.DescriberSettings{
ShowEvents: true,
ChunkSize: cmdutil.DefaultChunkSize,
},
CmdParent: parent,
@ -115,6 +116,7 @@ func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions.
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
cmd.Flags().BoolVarP(&o.AllNamespaces, "all-namespaces", "A", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.")
cmd.Flags().BoolVar(&o.DescriberSettings.ShowEvents, "show-events", o.DescriberSettings.ShowEvents, "If true, display events related to the described object.")
cmdutil.AddChunkSizeFlag(cmd, &o.DescriberSettings.ChunkSize)
return cmd
}
@ -156,6 +158,7 @@ func (o *DescribeOptions) Run() error {
FilenameParam(o.EnforceNamespace, o.FilenameOptions).
LabelSelectorParam(o.Selector).
ResourceTypeOrNameArgs(true, o.BuilderArgs...).
RequestChunksOf(o.DescriberSettings.ChunkSize).
Flatten().
Do()
err := r.Err()
@ -220,6 +223,7 @@ func (o *DescribeOptions) DescribeMatchingResources(originalError error, resourc
NamespaceParam(o.Namespace).DefaultNamespace().
ResourceTypeOrNameArgs(true, resource).
SingleResourceType().
RequestChunksOf(o.DescriberSettings.ChunkSize).
Flatten().
Do()
mapping, err := r.ResourceMapping()

View File

@ -216,6 +216,32 @@ func TestDescribeObjectSkipEvents(t *testing.T) {
}
}
func TestDescribeObjectChunkSize(t *testing.T) {
d := &testDescriber{Output: "test output"}
oldFn := describe.DescriberFn
defer func() {
describe.DescriberFn = oldFn
}()
describe.DescriberFn = d.describerFor
pods, _, _ := cmdtesting.TestData()
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
tf.UnstructuredClient = &fake.RESTClient{
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)},
}
cmd := NewCmdDescribe("kubectl", tf, genericclioptions.NewTestIOStreamsDiscard())
cmd.Flags().Set("chunk-size", "100")
cmd.Run(cmd, []string{"pods"})
if d.Settings.ChunkSize != 100 {
t.Errorf("ChunkSize = 100 expected, got ChunkSize = %v", d.Settings.ChunkSize)
}
}
func TestDescribeHelpMessage(t *testing.T) {
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()

View File

@ -33,6 +33,7 @@ import (
"unicode"
"github.com/fatih/camelcase"
"k8s.io/apimachinery/pkg/runtime"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
@ -65,6 +66,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/cli-runtime/pkg/genericclioptions"
runtimeresource "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
@ -284,7 +286,7 @@ func (g *genericDescriber) Describe(namespace, name string, describerSettings De
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj)
events, _ = searchEvents(g.events, obj, describerSettings.ChunkSize)
}
return tabbedString(func(out io.Writer) error {
@ -403,7 +405,17 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings
if err != nil {
return "", err
}
resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), metav1.ListOptions{})
resourceQuotaList := &corev1.ResourceQuotaList{}
err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize},
func(options metav1.ListOptions) (runtime.Object, error) {
newList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourceQuotas.String())
}
resourceQuotaList.Items = append(resourceQuotaList.Items, newList.Items...)
return newList, nil
})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support resource quotas.
@ -413,7 +425,17 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings
return "", err
}
}
limitRangeList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), metav1.ListOptions{})
limitRangeList := &corev1.LimitRangeList{}
err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize},
func(options metav1.ListOptions) (runtime.Object, error) {
newList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, "limitranges")
}
limitRangeList.Items = append(limitRangeList.Items, newList.Items...)
return newList, nil
})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support limit ranges.
@ -674,9 +696,22 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings Descri
if describerSettings.ShowEvents {
eventsInterface := d.CoreV1().Events(namespace)
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err2 := eventsInterface.List(context.TODO(), options)
if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 {
initialOpts := metav1.ListOptions{
FieldSelector: selector.String(),
Limit: describerSettings.ChunkSize,
}
events := &corev1.EventList{}
err2 := runtimeresource.FollowContinue(&initialOpts,
func(options metav1.ListOptions) (runtime.Object, error) {
newList, err := eventsInterface.List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, "events")
}
events.Items = append(events.Items, newList.Items...)
return newList, nil
})
if err2 == nil && len(events.Items) > 0 {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err)
@ -697,7 +732,7 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings Descri
if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod {
ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey])
}
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref)
events, _ = searchEvents(d.CoreV1(), ref, describerSettings.ChunkSize)
}
}
@ -1382,7 +1417,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv)
events, _ = searchEvents(d.CoreV1(), pv, describerSettings.ChunkSize)
}
return describePersistentVolume(pv, events)
@ -1530,18 +1565,18 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri
pc := d.CoreV1().Pods(namespace)
pods, err := getPodsForPVC(pc, pvc.Name)
pods, err := getPodsForPVC(pc, pvc.Name, describerSettings)
if err != nil {
return "", err
}
events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc)
events, _ := searchEvents(d.CoreV1(), pvc, describerSettings.ChunkSize)
return describePersistentVolumeClaim(pvc, events, pods)
}
func getPodsForPVC(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) {
nsPods, err := c.List(context.TODO(), metav1.ListOptions{})
func getPodsForPVC(c corev1client.PodInterface, pvcName string, settings DescriberSettings) ([]corev1.Pod, error) {
nsPods, err := getPodsInChunks(c, metav1.ListOptions{Limit: settings.ChunkSize})
if err != nil {
return []corev1.Pod{}, err
}
@ -2000,14 +2035,15 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID)
selector := labels.SelectorFromSet(controller.Spec.Selector)
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, controller.UID, describerSettings)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller)
events, _ = searchEvents(d.CoreV1(), controller, describerSettings.ChunkSize)
}
return describeReplicationController(controller, events, running, waiting, succeeded, failed)
@ -2080,11 +2116,11 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings
return "", err
}
running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID)
running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID, describerSettings)
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs)
events, _ = searchEvents(d.CoreV1(), rs, describerSettings.ChunkSize)
}
return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr)
@ -2136,7 +2172,7 @@ func (d *JobDescriber) Describe(namespace, name string, describerSettings Descri
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job)
events, _ = searchEvents(d.CoreV1(), job, describerSettings.ChunkSize)
}
return describeJob(job, events)
@ -2219,7 +2255,7 @@ func (d *CronJobDescriber) Describe(namespace, name string, describerSettings De
cronJob, err := d.client.BatchV1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob)
events, _ = searchEvents(d.client.CoreV1(), cronJob, describerSettings.ChunkSize)
}
return describeCronJob(cronJob, events)
}
@ -2230,7 +2266,7 @@ func (d *CronJobDescriber) Describe(namespace, name string, describerSettings De
return "", err
}
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob)
events, _ = searchEvents(d.client.CoreV1(), cronJobBeta, describerSettings.ChunkSize)
}
return describeCronJobBeta(cronJobBeta, events)
}
@ -2399,14 +2435,14 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID)
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID, describerSettings)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon)
events, _ = searchEvents(d.CoreV1(), daemon, describerSettings.ChunkSize)
}
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
@ -2490,14 +2526,14 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings De
netV1, err := i.client.NetworkingV1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = i.client.CoreV1().Events(namespace).Search(scheme.Scheme, netV1)
events, _ = searchEvents(i.client.CoreV1(), netV1, describerSettings.ChunkSize)
}
return i.describeIngressV1(netV1, events)
}
netV1beta1, err := i.client.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = i.client.CoreV1().Events(namespace).Search(scheme.Scheme, netV1beta1)
events, _ = searchEvents(i.client.CoreV1(), netV1beta1, describerSettings.ChunkSize)
}
return i.describeIngressV1beta1(netV1beta1, events)
}
@ -2703,14 +2739,14 @@ func (i *IngressClassDescriber) Describe(namespace, name string, describerSettin
netV1, err := i.client.NetworkingV1().IngressClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = i.client.CoreV1().Events(namespace).Search(scheme.Scheme, netV1)
events, _ = searchEvents(i.client.CoreV1(), netV1, describerSettings.ChunkSize)
}
return i.describeIngressClassV1(netV1, events)
}
netV1beta1, err := i.client.NetworkingV1beta1().IngressClasses().Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = i.client.CoreV1().Events(namespace).Search(scheme.Scheme, netV1beta1)
events, _ = searchEvents(i.client.CoreV1(), netV1beta1, describerSettings.ChunkSize)
}
return i.describeIngressClassV1beta1(netV1beta1, events)
}
@ -2779,7 +2815,7 @@ func (d *ServiceDescriber) Describe(namespace, name string, describerSettings De
endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service)
events, _ = searchEvents(d.CoreV1(), service, describerSettings.ChunkSize)
}
return describeService(service, endpoints, events)
}
@ -2898,7 +2934,7 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep)
events, _ = searchEvents(d.CoreV1(), ep, describerSettings.ChunkSize)
}
return describeEndpoints(ep, events)
@ -2970,7 +3006,7 @@ func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSetti
epsV1, err := d.DiscoveryV1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, epsV1)
events, _ = searchEvents(d.CoreV1(), epsV1, describerSettings.ChunkSize)
}
return describeEndpointSliceV1(epsV1, events)
}
@ -2981,7 +3017,7 @@ func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSetti
}
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, epsV1beta1)
events, _ = searchEvents(d.CoreV1(), epsV1beta1, describerSettings.ChunkSize)
}
return describeEndpointSliceV1beta1(epsV1beta1, events)
@ -3159,7 +3195,16 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett
// missingSecrets is the set of all secrets present in the
// serviceAccount but not present in the set of existing secrets.
missingSecrets := sets.NewString()
secrets, err := d.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{})
secrets := corev1.SecretList{}
err = runtimeresource.FollowContinue(&metav1.ListOptions{Limit: describerSettings.ChunkSize},
func(options metav1.ListOptions) (runtime.Object, error) {
newList, err := d.CoreV1().Secrets(namespace).List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourceSecrets.String())
}
secrets.Items = append(secrets.Items, newList.Items...)
return newList, nil
})
// errors are tolerated here in order to describe the serviceAccount with all
// of the secrets that it references, even if those secrets cannot be fetched.
@ -3193,7 +3238,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount)
events, _ = searchEvents(d.CoreV1(), serviceAccount, describerSettings.ChunkSize)
}
return describeServiceAccount(serviceAccount, tokens, missingSecrets, events)
@ -3436,7 +3481,11 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings Descr
// in a policy aware setting, users may have access to a node, but not all pods
// in that case, we note that the user does not have access to the pods
canViewPods := true
nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()})
initialOpts := metav1.ListOptions{
FieldSelector: fieldSelector.String(),
Limit: describerSettings.ChunkSize,
}
nodeNonTerminatedPodsList, err := getPodsInChunks(d.CoreV1().Pods(namespace), initialOpts)
if err != nil {
if !errors.IsForbidden(err) {
return "", err
@ -3451,7 +3500,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings Descr
} else {
// TODO: We haven't decided the namespace for Node object yet.
ref.UID = types.UID(ref.Name)
events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref)
events, _ = searchEvents(d.CoreV1(), ref, describerSettings.ChunkSize)
}
}
@ -3596,14 +3645,14 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID)
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID, describerSettings)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps)
events, _ = searchEvents(p.client.CoreV1(), ps, describerSettings.ChunkSize)
}
return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed)
@ -3664,7 +3713,7 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de
signerName = csr.Spec.SignerName
username = csr.Spec.Username
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr)
events, _ = searchEvents(p.client.CoreV1(), csr, describerSettings.ChunkSize)
}
} else if csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}); err == nil {
crBytes = csr.Spec.Request
@ -3679,7 +3728,7 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de
}
username = csr.Spec.Username
if describerSettings.ShowEvents {
events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr)
events, _ = searchEvents(p.client.CoreV1(), csr, describerSettings.ChunkSize)
}
} else {
return "", err
@ -3764,7 +3813,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc
hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV2beta2)
events, _ = searchEvents(d.client.CoreV1(), hpaV2beta2, describerSettings.ChunkSize)
}
return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d)
}
@ -3772,7 +3821,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc
hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
if describerSettings.ShowEvents {
events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV1)
events, _ = searchEvents(d.client.CoreV1(), hpaV1, describerSettings.ChunkSize)
}
return describeHorizontalPodAutoscalerV1(hpaV1, events, d)
}
@ -4115,19 +4164,28 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = searchEvents(dd.client.CoreV1(), d, describerSettings.ChunkSize)
}
var oldRSs, newRSs []*appsv1.ReplicaSet
if oldResult, _, newResult, err := deploymentutil.GetAllReplicaSetsInChunks(d, dd.client.AppsV1(), describerSettings.ChunkSize); err == nil {
oldRSs = oldResult
if newResult != nil {
newRSs = append(newRSs, newResult)
}
}
return describeDeployment(d, oldRSs, newRSs, events)
}
func describeDeployment(d *appsv1.Deployment, oldRSs []*appsv1.ReplicaSet, newRSs []*appsv1.ReplicaSet, events *corev1.EventList) (string, error) {
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return "", err
}
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d)
}
return describeDeployment(d, selector, d.DeepCopy(), events, dd)
}
func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name)
@ -4143,7 +4201,7 @@ func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internal
ru := d.Spec.Strategy.RollingUpdate
w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String())
}
DescribePodTemplate(&internalDeployment.Spec.Template, w)
DescribePodTemplate(&d.Spec.Template, w)
if len(d.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
@ -4151,13 +4209,9 @@ func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internal
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1())
if err == nil {
if len(oldRSs) > 0 || len(newRSs) > 0 {
w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
var newRSs []*appsv1.ReplicaSet
if newRS != nil {
newRSs = append(newRSs, newRS)
}
w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs))
}
if events != nil {
@ -4182,9 +4236,10 @@ func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string {
return list
}
func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) {
options := metav1.ListOptions{LabelSelector: selector.String()}
rcPods, err := c.List(context.TODO(), options)
func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID, settings DescriberSettings) (
running, waiting, succeeded, failed int, err error) {
initialOpts := metav1.ListOptions{LabelSelector: selector.String(), Limit: settings.ChunkSize}
rcPods, err := getPodsInChunks(c, initialOpts)
if err != nil {
return
}
@ -4208,6 +4263,20 @@ func getPodStatusForController(c corev1client.PodInterface, selector labels.Sele
return
}
func getPodsInChunks(c corev1client.PodInterface, initialOpts metav1.ListOptions) (*corev1.PodList, error) {
podList := &corev1.PodList{}
err := runtimeresource.FollowContinue(&initialOpts,
func(options metav1.ListOptions) (runtime.Object, error) {
newList, err := c.List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, corev1.ResourcePods.String())
}
podList.Items = append(podList.Items, newList.Items...)
return newList, nil
})
return podList, err
}
// ConfigMapDescriber generates information about a ConfigMap
type ConfigMapDescriber struct {
clientset.Interface
@ -4234,7 +4303,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings
w.Write(LEVEL_0, "%s\n", string(v))
}
if describerSettings.ShowEvents {
events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap)
events, err := searchEvents(d.CoreV1(), configMap, describerSettings.ChunkSize)
if err != nil {
return err
}
@ -4415,7 +4484,7 @@ func (s *StorageClassDescriber) Describe(namespace, name string, describerSettin
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc)
events, _ = searchEvents(s.CoreV1(), sc, describerSettings.ChunkSize)
}
return describeStorageClass(sc, events)
@ -4467,7 +4536,7 @@ func (c *CSINodeDescriber) Describe(namespace, name string, describerSettings De
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = c.CoreV1().Events(namespace).Search(scheme.Scheme, csi)
events, _ = searchEvents(c.CoreV1(), csi, describerSettings.ChunkSize)
}
return describeCSINode(csi, events)
@ -4549,7 +4618,7 @@ func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describe
if err == nil {
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdbv1)
events, _ = searchEvents(p.CoreV1(), pdbv1, describerSettings.ChunkSize)
}
return describePodDisruptionBudgetV1(pdbv1, events)
}
@ -4561,7 +4630,7 @@ func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describe
if err == nil {
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdbv1beta1)
events, _ = searchEvents(p.CoreV1(), pdbv1beta1, describerSettings.ChunkSize)
}
return describePodDisruptionBudgetV1beta1(pdbv1beta1, events)
}
@ -4642,7 +4711,7 @@ func (s *PriorityClassDescriber) Describe(namespace, name string, describerSetti
var events *corev1.EventList
if describerSettings.ShowEvents {
events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc)
events, _ = searchEvents(s.CoreV1(), pc, describerSettings.ChunkSize)
}
return describePriorityClass(pc, events)
@ -5411,3 +5480,37 @@ func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string {
}
return r
}
// searchEvents finds events about the specified object.
// It is very similar to CoreV1.Events.Search, but supports the Limit parameter.
func searchEvents(client corev1client.EventsGetter, objOrRef runtime.Object, limit int64) (*corev1.EventList, error) {
ref, err := reference.GetReference(scheme.Scheme, objOrRef)
if err != nil {
return nil, err
}
stringRefKind := string(ref.Kind)
var refKind *string
if len(stringRefKind) > 0 {
refKind = &stringRefKind
}
stringRefUID := string(ref.UID)
var refUID *string
if len(stringRefUID) > 0 {
refUID = &stringRefUID
}
e := client.Events(ref.Namespace)
fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
initialOpts := metav1.ListOptions{FieldSelector: fieldSelector.String(), Limit: limit}
eventList := &corev1.EventList{}
err = runtimeresource.FollowContinue(&initialOpts,
func(options metav1.ListOptions) (runtime.Object, error) {
newEvents, err := e.List(context.TODO(), options)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, options, "events")
}
eventList.Items = append(eventList.Items, newEvents.Items...)
return newEvents, nil
})
return eventList, err
}

View File

@ -48,6 +48,7 @@ type ResourceDescriber interface {
// describer to control what is printed.
type DescriberSettings struct {
ShowEvents bool
ChunkSize int64
}
// ObjectDescriber is an interface for displaying arbitrary objects with extra

View File

@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
runtimeresource "k8s.io/cli-runtime/pkg/resource"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
)
@ -84,7 +85,22 @@ func Revision(obj runtime.Object) (int64, error) {
// with no pods, and the second set of old replica sets include all old replica sets. The third returned value
// is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) {
rsList, err := listReplicaSets(deployment, rsListFromClient(c))
rsList, err := listReplicaSets(deployment, rsListFromClient(c), nil)
if err != nil {
return nil, nil, nil, err
}
newRS := findNewReplicaSet(deployment, rsList)
oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList, newRS)
return oldRSes, allOldRSes, newRS, nil
}
// GetAllReplicaSetsInChunks is the same as GetAllReplicaSets, but accepts a chunk size argument.
// It returns the old and new replica sets targeted by the given Deployment. It gets PodList and
// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones
// with no pods, and the second set of old replica sets include all old replica sets. The third returned value
// is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSetsInChunks(deployment *appsv1.Deployment, c appsclient.AppsV1Interface, chunkSize int64) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) {
rsList, err := listReplicaSets(deployment, rsListFromClient(c), &chunkSize)
if err != nil {
return nil, nil, nil, err
}
@ -95,8 +111,17 @@ func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interfa
// RsListFromClient returns an rsListFunc that wraps the given client.
func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
return func(namespace string, initialOpts metav1.ListOptions) ([]*appsv1.ReplicaSet, error) {
rsList := &appsv1.ReplicaSetList{}
err := runtimeresource.FollowContinue(&initialOpts,
func(opts metav1.ListOptions) (runtime.Object, error) {
newRs, err := c.ReplicaSets(namespace).List(context.TODO(), opts)
if err != nil {
return nil, runtimeresource.EnhanceListError(err, opts, "replicasets")
}
rsList.Items = append(rsList.Items, newRs.Items...)
return newRs, nil
})
if err != nil {
return nil, err
}
@ -115,7 +140,7 @@ type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error)
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) {
func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc, chunkSize *int64) ([]*appsv1.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
@ -124,6 +149,9 @@ func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*ap
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
if chunkSize != nil {
options.Limit = *chunkSize
}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err

View File

@ -45,6 +45,8 @@ run_daemonset_tests() {
# pod has field for kubectl set field manager
output_message=$(kubectl get daemonsets bind --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-set'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert daemonsets pods,events
# Rollout restart should change generation
kubectl rollout restart daemonset/bind "${kube_flags[@]:?}"
@ -210,6 +212,8 @@ run_deployment_tests() {
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert deployments replicasets,events
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}"
@ -284,6 +288,8 @@ run_deployment_tests() {
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert horizontalpodautoscalers events
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]:?}"
@ -501,6 +507,9 @@ run_stateful_set_tests() {
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert statefulsets pods,events
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0'
@ -588,6 +597,8 @@ run_rs_tests() {
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert replicasets pods,events
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas

View File

@ -45,6 +45,8 @@ run_job_tests() {
kube::test::get_object_assert 'cronjob/pi --namespace=test-jobs' "{{$id_field}}" 'pi'
kubectl get cronjob/pi --namespace=test-jobs
kubectl describe cronjob/pi --namespace=test-jobs
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert cronjobs events "--namespace=test-jobs"
### Create a job in dry-run mode
output_message=$(kubectl create job test-job --from=cronjob/pi --dry-run=true --namespace=test-jobs -o name)
@ -65,6 +67,8 @@ run_job_tests() {
kube::test::get_object_assert 'job/test-job --namespace=test-jobs' "{{$id_field}}" 'test-job'
kubectl get job/test-job --namespace=test-jobs
kubectl describe job/test-job --namespace=test-jobs
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert jobs events "--namespace=test-jobs"
#Clean up
kubectl delete job test-job --namespace=test-jobs
kubectl delete cronjob pi --namespace=test-jobs

View File

@ -30,6 +30,8 @@ run_certificates_tests() {
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert certificatesigningrequests events
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{${id_field:?}}}{{end}}" ''

View File

@ -52,6 +52,8 @@ run_configmap_tests() {
kube::test::get_object_assert 'configmap/test-binary-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-binary-configmap'
grep -q "key1: value1" <<< "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
grep -q "binaryData" <<< "$(kubectl get configmap/test-binary-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert configmaps events "--namespace=test-configmaps"
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete configmap test-binary-configmap --namespace=test-configmaps
@ -105,6 +107,8 @@ run_pod_tests() {
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert pods events
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml "${kube_flags[@]}")
@ -257,6 +261,8 @@ run_pod_tests() {
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert poddisruptionbudgets events "--namespace=test-kubectl-describe-pod"
### Create a pod disruption budget with maxUnavailable
# Command
@ -295,6 +301,8 @@ run_pod_tests() {
# Command
kubectl create priorityclass test-priorityclass
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" 'found:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert priorityclasses events
kubectl delete priorityclass test-priorityclass
### Create two PODs
@ -835,6 +843,8 @@ run_secrets_test() {
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
grep -q 'key1: dmFsdWUx' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert secrets "" "--namespace=test-secrets"
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
@ -955,6 +965,8 @@ run_service_accounts_tests() {
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert serviceaccounts secrets,events "--namespace=test-service-accounts"
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
@ -995,6 +1007,8 @@ run_service_tests() {
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert services events
### set selector
# prove role=master
@ -1218,6 +1232,8 @@ run_rc_tests() {
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert replicationcontrollers events
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
@ -1453,6 +1469,8 @@ run_namespace_tests() {
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert namespaces resourcequotas,limitranges
# Clean up
kubectl delete namespace my-namespace --wait=false
# make sure that wait properly waits for finalization
@ -1477,6 +1495,8 @@ run_namespace_tests() {
# Command
kubectl create quota test-quota --namespace=quotas
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" 'found:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert resourcequotas "" "--namespace=quotas"
# Clean up
kubectl delete quota test-quota --namespace=quotas
kubectl delete namespace quotas
@ -1545,6 +1565,8 @@ run_nodes_tests() {
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert nodes pods,events
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
@ -1587,6 +1609,8 @@ run_pod_templates_tests() {
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert podtemplates events
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist

View File

@ -274,6 +274,9 @@ run_non_native_resource_tests() {
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert customresourcedefinitions events
kube::test::describe_resource_chunk_size_assert foos events
# Delete the resource with cascading strategy background.
kubectl "${kube_flags[@]}" delete foos test --cascade=background

View File

@ -134,6 +134,10 @@ run_clusterroles_tests() {
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert clusterrolebindings
kube::test::describe_resource_chunk_size_assert clusterroles
set +o nounset
set +o errexit
}
@ -196,6 +200,10 @@ run_role_tests() {
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':apps:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert roles
kube::test::describe_resource_chunk_size_assert rolebindings
set +o nounset
set +o errexit
}

View File

@ -37,6 +37,8 @@ run_persistent_volumes_tests() {
kubectl delete pv pv0002 "${kube_flags[@]:?}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]:?}"
kube::test::get_object_assert pv "{{range.items}}{{${id_field:?}}}:{{end}}" 'pv0003:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert persistentvolumes events
kubectl delete pv pv0003 "${kube_flags[@]:?}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{${id_field:?}}}:{{end}}" ''
@ -65,6 +67,8 @@ run_persistent_volume_claims_tests() {
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]:?}"
kube::test::get_object_assert pvc "{{range.items}}{{${id_field:?}}}:{{end}}" 'myclaim-1:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert persistentvolumeclaims pods,events
kubectl delete pvc myclaim-1 "${kube_flags[@]:?}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]:?}"
@ -107,6 +111,8 @@ run_storage_class_tests() {
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{${id_field:?}}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{${id_field:?}}}:{{end}}" 'storage-class-name:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert storageclasses events
kubectl delete storageclass storage-class-name "${kube_flags[@]:?}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{${id_field:?}}}:{{end}}" ''