From 0cf3dba1f1df9555a46f2a086e48dcfe3c9ad6b2 Mon Sep 17 00:00:00 2001 From: Ross Peoples Date: Mon, 18 Oct 2021 11:05:28 -0500 Subject: [PATCH 1/9] drain node output should say drained not evicted --- .../src/k8s.io/kubectl/pkg/cmd/drain/drain.go | 11 +- .../kubectl/pkg/cmd/drain/drain_test.go | 233 ++++++++++-------- 2 files changed, 133 insertions(+), 111 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go index f14a4627a7d..319658453e2 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go @@ -297,17 +297,18 @@ func (o *DrainCmdOptions) RunDrain() error { return err } - printObj, err := o.ToPrinter("drained") - if err != nil { - return err - } - drainedNodes := sets.NewString() var fatal error for _, info := range o.nodeInfos { if err := o.deleteOrEvictPodsSimple(info); err == nil { drainedNodes.Insert(info.Name) + + printObj, err := o.ToPrinter("drained") + if err != nil { + return err + } + printObj(info.Object, o.Out) } else { if o.drainer.IgnoreErrors && len(o.nodeInfos) > 1 { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go index eacef7fd300..9a8fb6fdb93 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go @@ -546,16 +546,18 @@ func TestDrain(t *testing.T) { expectWarning string expectFatal bool expectDelete bool + expectOutputToContain string }{ { - description: "RC-managed pod", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{rcPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node"}, - expectFatal: false, - expectDelete: true, + description: "RC-managed pod", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{rcPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { description: "DS-managed pod", @@ -568,14 +570,15 @@ func TestDrain(t *testing.T) { expectDelete: false, }, { - description: "DS-managed terminated pod", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{dsTerminatedPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node"}, - expectFatal: false, - expectDelete: true, + description: "DS-managed terminated pod", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{dsTerminatedPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { description: "orphaned DS-managed pod", @@ -588,76 +591,83 @@ func TestDrain(t *testing.T) { expectDelete: false, }, { - description: "orphaned DS-managed pod with --force", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{orphanedDsPod}, - rcs: []corev1.ReplicationController{}, - args: []string{"node", "--force"}, - expectFatal: false, - expectDelete: true, - expectWarning: "WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/bar", + description: "orphaned DS-managed pod with --force", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{orphanedDsPod}, + rcs: []corev1.ReplicationController{}, + args: []string{"node", "--force"}, + expectFatal: false, + expectDelete: true, + expectWarning: "WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/bar", + expectOutputToContain: "node/node drained", }, { - description: "DS-managed pod with --ignore-daemonsets", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{dsPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node", "--ignore-daemonsets"}, - expectFatal: false, - expectDelete: false, + description: "DS-managed pod with --ignore-daemonsets", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{dsPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node", "--ignore-daemonsets"}, + expectFatal: false, + expectDelete: false, + expectOutputToContain: "node/node drained", }, { - description: "DS-managed pod with emptyDir with --ignore-daemonsets", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{dsPodWithEmptyDir}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node", "--ignore-daemonsets"}, - expectWarning: "WARNING: ignoring DaemonSet-managed Pods: default/bar", - expectFatal: false, - expectDelete: false, + description: "DS-managed pod with emptyDir with --ignore-daemonsets", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{dsPodWithEmptyDir}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node", "--ignore-daemonsets"}, + expectWarning: "WARNING: ignoring DaemonSet-managed Pods: default/bar", + expectFatal: false, + expectDelete: false, + expectOutputToContain: "node/node drained", }, { - description: "Job-managed pod with local storage", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{jobPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node", "--force", "--delete-emptydir-data=true"}, - expectFatal: false, - expectDelete: true, + description: "Job-managed pod with local storage", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{jobPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node", "--force", "--delete-emptydir-data=true"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { - description: "Ensure compatibility for --delete-local-data until fully deprecated", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{jobPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node", "--force", "--delete-local-data=true"}, - expectFatal: false, - expectDelete: true, + description: "Ensure compatibility for --delete-local-data until fully deprecated", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{jobPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node", "--force", "--delete-local-data=true"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { - description: "Job-managed terminated pod", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{terminatedJobPodWithLocalStorage}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node"}, - expectFatal: false, - expectDelete: true, + description: "Job-managed terminated pod", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{terminatedJobPodWithLocalStorage}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { - description: "RS-managed pod", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{rsPod}, - replicaSets: []appsv1.ReplicaSet{rs}, - args: []string{"node"}, - expectFatal: false, - expectDelete: true, + description: "RS-managed pod", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{rsPod}, + replicaSets: []appsv1.ReplicaSet{rs}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { description: "naked pod", @@ -670,14 +680,15 @@ func TestDrain(t *testing.T) { expectDelete: false, }, { - description: "naked pod with --force", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{nakedPod}, - rcs: []corev1.ReplicationController{}, - args: []string{"node", "--force"}, - expectFatal: false, - expectDelete: true, + description: "naked pod with --force", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{nakedPod}, + rcs: []corev1.ReplicationController{}, + args: []string{"node", "--force"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { description: "pod with EmptyDir", @@ -689,33 +700,36 @@ func TestDrain(t *testing.T) { expectDelete: false, }, { - description: "terminated pod with emptyDir", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{emptydirTerminatedPod}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node"}, - expectFatal: false, - expectDelete: true, + description: "terminated pod with emptyDir", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{emptydirTerminatedPod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { - description: "pod with EmptyDir and --delete-emptydir-data", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{emptydirPod}, - args: []string{"node", "--force", "--delete-emptydir-data=true"}, - expectFatal: false, - expectDelete: true, + description: "pod with EmptyDir and --delete-emptydir-data", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{emptydirPod}, + args: []string{"node", "--force", "--delete-emptydir-data=true"}, + expectFatal: false, + expectDelete: true, + expectOutputToContain: "node/node drained", }, { - description: "empty node", - node: node, - expected: cordonedNode, - pods: []corev1.Pod{}, - rcs: []corev1.ReplicationController{rc}, - args: []string{"node"}, - expectFatal: false, - expectDelete: false, + description: "empty node", + node: node, + expected: cordonedNode, + pods: []corev1.Pod{}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: false, + expectOutputToContain: "node/node drained", }, { description: "fail to list pods", @@ -859,7 +873,7 @@ func TestDrain(t *testing.T) { } tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - ioStreams, _, _, errBuf := genericclioptions.NewTestIOStreams() + ioStreams, _, outBuf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdDrain(tf, ioStreams) var recovered interface{} @@ -925,6 +939,13 @@ func TestDrain(t *testing.T) { t.Fatalf("%s: actual warning message did not match expected warning message.\n Expecting:\n%v\n Got:\n%v", test.description, e, a) } } + + if len(test.expectOutputToContain) > 0 { + out := outBuf.String() + if !strings.Contains(out, test.expectOutputToContain) { + t.Fatalf("%s: expected output to contain: %s\nGot:\n%s", test.description, test.expectOutputToContain, out) + } + } }) } } From c8f87a6a243ab6d2e9f3503a5d363385aefd9244 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 26 Oct 2021 09:47:16 -0400 Subject: [PATCH 2/9] retry PV create in e2e-test on API quota failure --- .../autoscaling/cluster_size_autoscaling.go | 2 +- test/e2e/framework/pv/pv.go | 50 ++++++++++++++----- test/e2e/framework/timeouts.go | 5 ++ .../flexvolume_mounted_volume_resize.go | 2 +- test/e2e/storage/flexvolume_online_resize.go | 2 +- test/e2e/storage/framework/volume_resource.go | 2 +- .../nfs_persistent_volume-disruptive.go | 6 +-- test/e2e/storage/persistent_volumes-gce.go | 2 +- test/e2e/storage/persistent_volumes-local.go | 8 +-- test/e2e/storage/persistent_volumes.go | 16 +++--- test/e2e/storage/ubernetes_lite_volumes.go | 3 +- test/e2e/storage/volume_metrics.go | 6 +-- .../vsphere/persistent_volumes-vsphere.go | 2 +- 13 files changed, 69 insertions(+), 37 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 00f3daf2a5e..1ab01edd61e 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -491,7 +491,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { StorageClassName: &emptyStorageClass, } - pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false) + pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc)) diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index 6366dcaa3b8..1818575f16b 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -19,8 +19,11 @@ package framework import ( "context" "fmt" + "strings" "time" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/storage/utils" "github.com/onsi/ginkgo" @@ -295,17 +298,40 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time } // create the PV resource. Fails test on error. -func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { - pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) +func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { + var resultPV *v1.PersistentVolume + var lastCreateErr error + err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) { + resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + if lastCreateErr != nil { + // If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP. + // If quota failure strings are found for other platforms, they can be added to improve reliability when running + // many parallel test jobs in a single cloud account. This corresponds to controller-like behavior and + // to what we would recommend for general clients. + if strings.Contains(lastCreateErr.Error(), `googleapi: Error 403: Quota exceeded for quota group`) { + return false, nil + } + + // if it was not a quota failure, fail immediately + return false, lastCreateErr + } + + return true, nil + }) + // if we have an error from creating the PV, use that instead of a timeout error + if lastCreateErr != nil { + return nil, fmt.Errorf("PV Create API error: %v", err) + } if err != nil { return nil, fmt.Errorf("PV Create API error: %v", err) } - return pv, nil + + return resultPV, nil } // CreatePV creates the PV resource. Fails test on error. -func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { - return createPV(c, pv) +func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { + return createPV(c, timeouts, pv) } // CreatePVC creates the PVC resource. Fails test on error. @@ -323,7 +349,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) // Note: in the pre-bind case the real PVC name, which is generated, is not // known until after the PVC is instantiated. This is why the pvc is created // before the pv. -func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { +func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { // make the pvc spec pvc := MakePersistentVolumeClaim(pvcConfig, ns) preBindMsg := "" @@ -344,7 +370,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf if preBind { pv.Spec.ClaimRef.Name = pvc.Name } - pv, err = createPV(c, pv) + pv, err = createPV(c, timeouts, pv) if err != nil { return nil, pvc, err } @@ -358,7 +384,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf // Note: in the pre-bind case the real PV name, which is generated, is not // known until after the PV is instantiated. This is why the pv is created // before the pvc. -func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { +func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { preBindMsg := "" if preBind { preBindMsg = " pre-bound" @@ -370,7 +396,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf pvc := MakePersistentVolumeClaim(pvcConfig, ns) // instantiate the pv - pv, err := createPV(c, pv) + pv, err := createPV(c, timeouts, pv) if err != nil { return nil, nil, err } @@ -392,7 +418,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf // sees an error returned, it needs to decide what to do about entries in the maps. // Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However, // orphaned pvs are not deleted and will remain after the suite completes. -func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) { +func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) { pvMap := make(PVMap, numpvs) pvcMap := make(PVCMap, numpvcs) extraPVCs := 0 @@ -405,7 +431,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf // create pvs and pvcs for i := 0; i < pvsToCreate; i++ { - pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false) if err != nil { return pvMap, pvcMap, err } @@ -416,7 +442,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf // create extra pvs or pvcs as needed for i := 0; i < extraPVs; i++ { pv := MakePersistentVolume(pvConfig) - pv, err := createPV(c, pv) + pv, err := createPV(c, timeouts, pv) if err != nil { return pvMap, pvcMap, err } diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index c0aafcce490..30d48f8b336 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -29,6 +29,7 @@ const ( claimBoundTimeout = 3 * time.Minute pvReclaimTimeout = 3 * time.Minute pvBoundTimeout = 3 * time.Minute + pvCreateTimeout = 3 * time.Minute pvDeleteTimeout = 3 * time.Minute pvDeleteSlowTimeout = 20 * time.Minute snapshotCreateTimeout = 5 * time.Minute @@ -67,6 +68,9 @@ type TimeoutContext struct { // PVBound is how long PVs have to become bound. PVBound time.Duration + // PVCreate is how long PVs have to be created. + PVCreate time.Duration + // PVDelete is how long PVs have to become deleted. PVDelete time.Duration @@ -95,6 +99,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext { ClaimBound: claimBoundTimeout, PVReclaim: pvReclaimTimeout, PVBound: pvBoundTimeout, + PVCreate: pvCreateTimeout, PVDelete: pvDeleteTimeout, PVDeleteSlow: pvDeleteSlowTimeout, SnapshotCreate: snapshotCreateTimeout, diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 2c093248d08..33ab803486b 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] VolumeMode: pvc.Spec.VolumeMode, }) - _, err = e2epv.CreatePV(c, pv) + _, err = e2epv.CreatePV(c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv %v", err) ginkgo.By("Waiting for PVC to be in bound phase") diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 242e1ade21d..69f6500f99d 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -140,7 +140,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa VolumeMode: pvc.Spec.VolumeMode, }) - _, err = e2epv.CreatePV(c, pv) + _, err = e2epv.CreatePV(c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv %v", err) ginkgo.By("Waiting for PVC to be in bound phase") diff --git a/test/e2e/storage/framework/volume_resource.go b/test/e2e/storage/framework/volume_resource.go index 0b9d3896a9e..66b521692ec 100644 --- a/test/e2e/storage/framework/volume_resource.go +++ b/test/e2e/storage/framework/volume_resource.go @@ -259,7 +259,7 @@ func createPVCPV( } framework.Logf("Creating PVC and PV") - pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) + pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false) framework.ExpectNoError(err, "PVC, PV creation failed") err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc) diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index ac567beb426..35876be3c4c 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { PVSource: *pvSource1, Prebind: nil, } - pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false) + pv1, pvc1, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig1, pvcConfig, ns, false) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1)) @@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { PVSource: *pvSource2, Prebind: nil, } - pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false) + pv2, pvc2, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig2, pvcConfig, ns, false) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2)) @@ -293,7 +293,7 @@ func createGCEVolume() (*v1.PersistentVolumeSource, string) { // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed // by the test. func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { - pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) defer func() { if err != nil { e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns) diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index d6c7ade4293..cd479b7c913 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -46,7 +46,7 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { // initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up. func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { ginkgo.By("Creating the PV and PVC") - pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound) + pv, pvc, err := e2epv.CreatePVPVC(c, t, pvConfig, pvcConfig, ns, isPrebound) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc)) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 854bf416d77..6c2d94f0661 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -463,7 +463,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { for _, localVolumes := range allLocalVolumes { for _, localVolume := range localVolumes { pvConfig := makeLocalPVConfig(config, localVolume) - localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig)) + localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) } } @@ -505,7 +505,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) pvConfig := makeLocalPVConfig(config, localVolume) - localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig)) + localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) } } @@ -637,7 +637,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } pvConfig := makeLocalPVConfig(config, localVolume) var err error - pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig)) + pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) framework.ExpectNoError(err) }) @@ -936,7 +936,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod pvcConfig := makeLocalPVCConfig(config, volume.localVolumeType) pvConfig := makeLocalPVConfig(config, volume) - volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, false) + volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, config.timeouts, pvConfig, pvcConfig, config.ns, false) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index a73efc08bf1..2d08f8525db 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // contains the claim. Verify that the PV and PVC bind correctly, and // that the pod can write to the nfs volume. ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() { - pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() { - pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PVC and a pre-bound PV: test write access", func() { - pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. ginkgo.It("create a PV and a pre-bound PVC: test write access", func() { - pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Note: PVs are created before claims and no pre-binding ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() { numPVs, numPVCs := 2, 4 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) @@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Note: PVs are created before claims and no pre-binding ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() { numPVs, numPVCs := 3, 3 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) @@ -251,7 +251,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Note: PVs are created before claims and no pre-binding. ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { numPVs, numPVCs := 4, 2 - pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) + pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) @@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.Context("when invoking the Recycle reclaim policy", func() { ginkgo.BeforeEach(func() { pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle - pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") }) diff --git a/test/e2e/storage/ubernetes_lite_volumes.go b/test/e2e/storage/ubernetes_lite_volumes.go index 70e642946e2..c3028614164 100644 --- a/test/e2e/storage/ubernetes_lite_volumes.go +++ b/test/e2e/storage/ubernetes_lite_volumes.go @@ -19,6 +19,7 @@ package storage import ( "context" "fmt" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -110,7 +111,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) className := "" pvcConfig := e2epv.PersistentVolumeClaimConfig{StorageClassName: &className} - config.pv, config.pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) + config.pv, config.pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index fb9e06f0387..a055b4d1c61 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -598,7 +598,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only", func() { var err error - pv, err = e2epv.CreatePV(c, pv) + pv, err = e2epv.CreatePV(c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv: %v", err) waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey) validator([]map[string]int64{nil, {className: 1}, nil, nil}) @@ -616,7 +616,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc", func() { var err error - pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) + pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err, "Error creating pv pvc: %v", err) waitForPVControllerSync(metricsGrabber, boundPVKey, classKey) waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey) @@ -627,7 +627,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { func() { var err error dimensions := []string{pluginNameKey, volumeModeKey} - pv, err = e2epv.CreatePV(c, pv) + pv, err = e2epv.CreatePV(c, f.Timeouts, pv) framework.ExpectNoError(err, "Error creating pv: %v", err) waitForPVControllerSync(metricsGrabber, totalPVKey, pluginNameKey) controllerMetrics, err := metricsGrabber.GrabFromControllerManager() diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 4c1c070e5c1..8cd8eaf2dbf 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() } } ginkgo.By("Creating the PV and PVC") - pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) + pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) From 7aa5cb40316dbeb970250ee0835fbd597fd19a20 Mon Sep 17 00:00:00 2001 From: Marc Khouzam Date: Fri, 15 Oct 2021 15:36:09 -0400 Subject: [PATCH 3/9] Complete multiple resource names This commit teaches the completion function to repeat resource names when supported by the command. The logic checks if a resource name has already been specified by the user and does not include it again when repeating the completion. For example, the get command can receive multiple pods names, therefore with this commit we have: kubectl get pod pod1 [tab] will provide completion of pod names again, but not show 'pod1' since it is already part of the command-line. The improvement affects the following commands: - annotate - apply edit-last-applied - apply view-last-applied - autoscale - delete - describe - edit - expose - get - label - patch - rollout history - rollout pause - rollout restart - rollout resume - rollout undo - scale - taint Note that "rollout status" only accepts a single resource name, unlike the other "rollout ..." commands; this required the creation of a special completion function that did not repeat just for that case. Signed-off-by: Marc Khouzam --- staging/src/k8s.io/kubectl/pkg/cmd/get/get.go | 5 +- .../kubectl/pkg/cmd/rollout/rollout_status.go | 2 +- .../k8s.io/kubectl/pkg/cmd/util/helpers.go | 15 +++ .../kubectl/pkg/cmd/util/helpers_test.go | 40 +++++++ .../src/k8s.io/kubectl/pkg/util/completion.go | 29 ++++- .../kubectl/pkg/util/completion_test.go | 101 ++++++++---------- 6 files changed, 127 insertions(+), 65 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go index b04489b7348..9cfd37da044 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go @@ -167,8 +167,11 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr var comps []string if len(args) == 0 { comps = apiresources.CompGetResourceList(f, cmd, toComplete) - } else if len(args) == 1 { + } else { comps = CompGetResource(f, cmd, args[0], toComplete) + if len(args) > 1 { + comps = cmdutil.Difference(comps, args[1:]) + } } return comps, cobra.ShellCompDirectiveNoFileComp }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go index 11704af03a9..39e012a8d27 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go @@ -102,7 +102,7 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) Short: i18n.T("Show the status of the rollout"), Long: statusLong, Example: statusExample, - ValidArgsFunction: util.SpecifiedResourceTypeAndNameCompletionFunc(f, validArgs), + ValidArgsFunction: util.SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f, validArgs), Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, args)) cmdutil.CheckErr(o.Validate()) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 7b03deb29c0..bb24f61e65f 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -733,3 +733,18 @@ func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { oldGeneratorName, ) } + +// Difference removes any elements of subArray from fullArray and returns the result +func Difference(fullArray []string, subArray []string) []string { + exclude := make(map[string]bool, len(subArray)) + for _, elem := range subArray { + exclude[elem] = true + } + var result []string + for _, elem := range fullArray { + if _, found := exclude[elem]; !found { + result = append(result, elem) + } + } + return result +} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers_test.go index 55785335e6d..f268cdfa3ab 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers_test.go @@ -25,6 +25,9 @@ import ( "syscall" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -321,3 +324,40 @@ func TestDumpReaderToFile(t *testing.T) { t.Fatalf("Wrong file content %s != %s", testString, stringData) } } + +func TestDifferenceFunc(t *testing.T) { + tests := []struct { + name string + fullArray []string + subArray []string + expected []string + }{ + { + name: "remove some", + fullArray: []string{"a", "b", "c", "d"}, + subArray: []string{"c", "b"}, + expected: []string{"a", "d"}, + }, + { + name: "remove all", + fullArray: []string{"a", "b", "c", "d"}, + subArray: []string{"b", "d", "a", "c"}, + expected: nil, + }, + { + name: "remove none", + fullArray: []string{"a", "b", "c", "d"}, + subArray: nil, + expected: []string{"a", "b", "c", "d"}, + }, + } + + for _, tc := range tests { + result := Difference(tc.fullArray, tc.subArray) + if !cmp.Equal(tc.expected, result, cmpopts.SortSlices(func(x, y string) bool { + return x < y + })) { + t.Errorf("%s -> Expected: %v, but got: %v", tc.name, tc.expected, result) + } + } +} diff --git a/staging/src/k8s.io/kubectl/pkg/util/completion.go b/staging/src/k8s.io/kubectl/pkg/util/completion.go index 3253b75764c..3fe8f8cd95f 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/completion.go +++ b/staging/src/k8s.io/kubectl/pkg/util/completion.go @@ -34,15 +34,18 @@ func SetFactoryForCompletion(f cmdutil.Factory) { } // ResourceTypeAndNameCompletionFunc Returns a completion function that completes as a first argument -// the resource types that match the toComplete prefix, and as a second argument the resource names that match +// the resource types that match the toComplete prefix, and all following arguments as resource names that match // the toComplete prefix. func ResourceTypeAndNameCompletionFunc(f cmdutil.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var comps []string if len(args) == 0 { comps = apiresources.CompGetResourceList(f, cmd, toComplete) - } else if len(args) == 1 { + } else { comps = get.CompGetResource(f, cmd, args[0], toComplete) + if len(args) > 1 { + comps = cmdutil.Difference(comps, args[1:]) + } } return comps, cobra.ShellCompDirectiveNoFileComp } @@ -50,8 +53,19 @@ func ResourceTypeAndNameCompletionFunc(f cmdutil.Factory) func(*cobra.Command, [ // SpecifiedResourceTypeAndNameCompletionFunc Returns a completion function that completes as a first // argument the resource types that match the toComplete prefix and are limited to the allowedTypes, -// and as a second argument the specified resource names that match the toComplete prefix. +// and all following arguments as resource names that match the toComplete prefix. func SpecifiedResourceTypeAndNameCompletionFunc(f cmdutil.Factory, allowedTypes []string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return doSpecifiedResourceTypeAndNameComp(f, allowedTypes, true) +} + +// SpecifiedResourceTypeAndNameNoRepeatCompletionFunc Returns a completion function that completes as a first +// argument the resource types that match the toComplete prefix and are limited to the allowedTypes, and as +// a second argument a resource name that match the toComplete prefix. +func SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f cmdutil.Factory, allowedTypes []string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return doSpecifiedResourceTypeAndNameComp(f, allowedTypes, false) +} + +func doSpecifiedResourceTypeAndNameComp(f cmdutil.Factory, allowedTypes []string, repeat bool) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var comps []string if len(args) == 0 { @@ -60,8 +74,13 @@ func SpecifiedResourceTypeAndNameCompletionFunc(f cmdutil.Factory, allowedTypes comps = append(comps, comp) } } - } else if len(args) == 1 { - comps = get.CompGetResource(f, cmd, args[0], toComplete) + } else { + if repeat || len(args) == 1 { + comps = get.CompGetResource(f, cmd, args[0], toComplete) + if repeat && len(args) > 1 { + comps = cmdutil.Difference(comps, args[1:]) + } + } } return comps, cobra.ShellCompDirectiveNoFileComp } diff --git a/staging/src/k8s.io/kubectl/pkg/util/completion_test.go b/staging/src/k8s.io/kubectl/pkg/util/completion_test.go index 663b306720e..da94faa99ae 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/completion_test.go +++ b/staging/src/k8s.io/kubectl/pkg/util/completion_test.go @@ -35,115 +35,100 @@ import ( func TestResourceTypeAndNameCompletionFuncOneArg(t *testing.T) { tf, cmd := prepareCompletionTest() addPodsToFactory(tf) + compFunc := ResourceTypeAndNameCompletionFunc(tf) comps, directive := compFunc(cmd, []string{"pod"}, "b") checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp) } -func TestResourceTypeAndNameCompletionFuncTooManyArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory() - defer tf.Cleanup() +func TestResourceTypeAndNameCompletionFuncRepeating(t *testing.T) { + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := ResourceTypeAndNameCompletionFunc(tf) - comps, directive := compFunc(cmd, []string{"pod", "pod-name"}, "") - checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp) + comps, directive := compFunc(cmd, []string{"pod", "bar"}, "") + // The other pods should be completed, but not the already specified ones + checkCompletion(t, comps, []string{"foo"}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestSpecifiedResourceTypeAndNameCompletionFuncNoArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory() - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod", "service", "statefulset"}) comps, directive := compFunc(cmd, []string{}, "s") checkCompletion(t, comps, []string{"service", "statefulset"}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestSpecifiedResourceTypeAndNameCompletionFuncOneArg(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace("test") - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - pods, _, _ := cmdtesting.TestData() - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - tf.UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer, - Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)}, - } - - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod"}) comps, directive := compFunc(cmd, []string{"pod"}, "b") checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp) } -func TestSpecifiedResourceTypeAndNameCompletionFuncTooManyArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory() - defer tf.Cleanup() +func TestSpecifiedResourceTypeAndNameCompletionFuncRepeating(t *testing.T) { + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod"}) - comps, directive := compFunc(cmd, []string{"pod", "pod-name"}, "") + comps, directive := compFunc(cmd, []string{"pod", "bar"}, "") + // The other pods should be completed, but not the already specified ones + checkCompletion(t, comps, []string{"foo"}, directive, cobra.ShellCompDirectiveNoFileComp) +} + +func TestSpecifiedResourceTypeAndNameCompletionNoRepeatFuncOneArg(t *testing.T) { + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) + + compFunc := SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(tf, []string{"pod"}) + comps, directive := compFunc(cmd, []string{"pod"}, "b") + checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp) +} + +func TestSpecifiedResourceTypeAndNameCompletionNoRepeatFuncMultiArg(t *testing.T) { + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) + + compFunc := SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(tf, []string{"pod"}) + comps, directive := compFunc(cmd, []string{"pod", "bar"}, "") + // There should not be any more pods shown as this function should not repeat the completion checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestResourceNameCompletionFuncNoArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace("test") - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - pods, _, _ := cmdtesting.TestData() - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - tf.UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer, - Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)}, - } - - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := ResourceNameCompletionFunc(tf, "pod") comps, directive := compFunc(cmd, []string{}, "b") checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestResourceNameCompletionFuncTooManyArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory() - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := ResourceNameCompletionFunc(tf, "pod") comps, directive := compFunc(cmd, []string{"pod-name"}, "") checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestPodResourceNameAndContainerCompletionFuncNoArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace("test") - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - pods, _, _ := cmdtesting.TestData() - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - tf.UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer, - Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)}, - } - - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := PodResourceNameAndContainerCompletionFunc(tf) comps, directive := compFunc(cmd, []string{}, "b") checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp) } func TestPodResourceNameAndContainerCompletionFuncTooManyArgs(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace("test") - defer tf.Cleanup() + tf, cmd := prepareCompletionTest() + addPodsToFactory(tf) - streams, _, _, _ := genericclioptions.NewTestIOStreams() - cmd := get.NewCmdGet("kubectl", tf, streams) compFunc := PodResourceNameAndContainerCompletionFunc(tf) comps, directive := compFunc(cmd, []string{"pod-name", "container-name"}, "") checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp) From efa2b1526976b5f699ced4258c9491ff6b596bd6 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Wed, 27 Oct 2021 17:00:43 +0000 Subject: [PATCH 4/9] CHANGELOG: Update directory for v1.19.16 release --- CHANGELOG/CHANGELOG-1.19.md | 406 ++++++++++++++++++++++-------------- 1 file changed, 244 insertions(+), 162 deletions(-) diff --git a/CHANGELOG/CHANGELOG-1.19.md b/CHANGELOG/CHANGELOG-1.19.md index 245f4b387db..bb62dfb2048 100644 --- a/CHANGELOG/CHANGELOG-1.19.md +++ b/CHANGELOG/CHANGELOG-1.19.md @@ -1,232 +1,245 @@ -- [v1.19.15](#v11915) - - [Downloads for v1.19.15](#downloads-for-v11915) +- [v1.19.16](#v11916) + - [Downloads for v1.19.16](#downloads-for-v11916) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.19.14](#changelog-since-v11914) - - [Important Security Information](#important-security-information) - - [CVE-2021-25741: Symlink Exchange Can Allow Host Filesystem Access](#cve-2021-25741-symlink-exchange-can-allow-host-filesystem-access) + - [Changelog since v1.19.15](#changelog-since-v11915) - [Changes by Kind](#changes-by-kind) - [Bug or Regression](#bug-or-regression) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.19.14](#v11914) - - [Downloads for v1.19.14](#downloads-for-v11914) +- [v1.19.15](#v11915) + - [Downloads for v1.19.15](#downloads-for-v11915) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.19.13](#changelog-since-v11913) + - [Changelog since v1.19.14](#changelog-since-v11914) + - [Important Security Information](#important-security-information) + - [CVE-2021-25741: Symlink Exchange Can Allow Host Filesystem Access](#cve-2021-25741-symlink-exchange-can-allow-host-filesystem-access) - [Changes by Kind](#changes-by-kind-1) - - [Feature](#feature) + - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.19.13](#v11913) - - [Downloads for v1.19.13](#downloads-for-v11913) +- [v1.19.14](#v11914) + - [Downloads for v1.19.14](#downloads-for-v11914) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.19.12](#changelog-since-v11912) + - [Changelog since v1.19.13](#changelog-since-v11913) - [Changes by Kind](#changes-by-kind-2) - - [Feature](#feature-1) - - [Bug or Regression](#bug-or-regression-1) + - [Feature](#feature) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.19.12](#v11912) - - [Downloads for v1.19.12](#downloads-for-v11912) +- [v1.19.13](#v11913) + - [Downloads for v1.19.13](#downloads-for-v11913) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.19.11](#changelog-since-v11911) + - [Changelog since v1.19.12](#changelog-since-v11912) - [Changes by Kind](#changes-by-kind-3) - - [Feature](#feature-2) - - [Failing Test](#failing-test) + - [Feature](#feature-1) - [Bug or Regression](#bug-or-regression-2) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.19.11](#v11911) - - [Downloads for v1.19.11](#downloads-for-v11911) +- [v1.19.12](#v11912) + - [Downloads for v1.19.12](#downloads-for-v11912) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.19.10](#changelog-since-v11910) + - [Changelog since v1.19.11](#changelog-since-v11911) - [Changes by Kind](#changes-by-kind-4) - - [API Change](#api-change) - - [Feature](#feature-3) + - [Feature](#feature-2) + - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-3) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.19.10](#v11910) - - [Downloads for v1.19.10](#downloads-for-v11910) +- [v1.19.11](#v11911) + - [Downloads for v1.19.11](#downloads-for-v11911) - [Source Code](#source-code-5) - - [Client binaries](#client-binaries-5) - - [Server binaries](#server-binaries-5) - - [Node binaries](#node-binaries-5) - - [Changelog since v1.19.9](#changelog-since-v1199) - - [Important Security Information](#important-security-information) - - [CVE-2021-25735: Validating Admission Webhook does not observe some previous fields](#cve-2021-25735-validating-admission-webhook-does-not-observe-some-previous-fields) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) + - [Changelog since v1.19.10](#changelog-since-v11910) - [Changes by Kind](#changes-by-kind-5) - - [API Change](#api-change-1) - - [Feature](#feature-4) + - [API Change](#api-change) + - [Feature](#feature-3) - [Bug or Regression](#bug-or-regression-4) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.19.9](#v1199) - - [Downloads for v1.19.9](#downloads-for-v1199) +- [v1.19.10](#v11910) + - [Downloads for v1.19.10](#downloads-for-v11910) - [Source Code](#source-code-6) - [Client binaries](#client-binaries-6) - [Server binaries](#server-binaries-6) - [Node binaries](#node-binaries-6) - - [Changelog since v1.19.8](#changelog-since-v1198) + - [Changelog since v1.19.9](#changelog-since-v1199) + - [Important Security Information](#important-security-information-1) + - [CVE-2021-25735: Validating Admission Webhook does not observe some previous fields](#cve-2021-25735-validating-admission-webhook-does-not-observe-some-previous-fields) - [Changes by Kind](#changes-by-kind-6) - - [Failing Test](#failing-test-1) + - [API Change](#api-change-1) + - [Feature](#feature-4) - [Bug or Regression](#bug-or-regression-5) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.19.8](#v1198) - - [Downloads for v1.19.8](#downloads-for-v1198) +- [v1.19.9](#v1199) + - [Downloads for v1.19.9](#downloads-for-v1199) - [Source Code](#source-code-7) - [Client binaries](#client-binaries-7) - [Server binaries](#server-binaries-7) - [Node binaries](#node-binaries-7) - - [Changelog since v1.19.7](#changelog-since-v1197) + - [Changelog since v1.19.8](#changelog-since-v1198) - [Changes by Kind](#changes-by-kind-7) - - [API Change](#api-change-2) + - [Failing Test](#failing-test-1) - [Bug or Regression](#bug-or-regression-6) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.19.7](#v1197) - - [Downloads for v1.19.7](#downloads-for-v1197) +- [v1.19.8](#v1198) + - [Downloads for v1.19.8](#downloads-for-v1198) - [Source Code](#source-code-8) - [Client binaries](#client-binaries-8) - [Server binaries](#server-binaries-8) - [Node binaries](#node-binaries-8) - - [Changelog since v1.19.6](#changelog-since-v1196) + - [Changelog since v1.19.7](#changelog-since-v1197) - [Changes by Kind](#changes-by-kind-8) + - [API Change](#api-change-2) - [Bug or Regression](#bug-or-regression-7) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-8) - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) -- [v1.19.6](#v1196) - - [Downloads for v1.19.6](#downloads-for-v1196) +- [v1.19.7](#v1197) + - [Downloads for v1.19.7](#downloads-for-v1197) - [Source Code](#source-code-9) - [Client binaries](#client-binaries-9) - [Server binaries](#server-binaries-9) - [Node binaries](#node-binaries-9) - - [Changelog since v1.19.5](#changelog-since-v1195) + - [Changelog since v1.19.6](#changelog-since-v1196) - [Changes by Kind](#changes-by-kind-9) - - [Feature](#feature-5) - [Bug or Regression](#bug-or-regression-8) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-9) - [Added](#added-9) - [Changed](#changed-9) - [Removed](#removed-9) -- [v1.19.5](#v1195) - - [Downloads for v1.19.5](#downloads-for-v1195) +- [v1.19.6](#v1196) + - [Downloads for v1.19.6](#downloads-for-v1196) - [Source Code](#source-code-10) - [Client binaries](#client-binaries-10) - [Server binaries](#server-binaries-10) - [Node binaries](#node-binaries-10) - - [Changelog since v1.19.4](#changelog-since-v1194) + - [Changelog since v1.19.5](#changelog-since-v1195) - [Changes by Kind](#changes-by-kind-10) - - [Feature](#feature-6) - - [Failing Test](#failing-test-2) + - [Feature](#feature-5) - [Bug or Regression](#bug-or-regression-9) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-10) - [Added](#added-10) - [Changed](#changed-10) - [Removed](#removed-10) -- [v1.19.4](#v1194) - - [Downloads for v1.19.4](#downloads-for-v1194) +- [v1.19.5](#v1195) + - [Downloads for v1.19.5](#downloads-for-v1195) - [Source Code](#source-code-11) - [Client binaries](#client-binaries-11) - [Server binaries](#server-binaries-11) - [Node binaries](#node-binaries-11) - - [Changelog since v1.19.3](#changelog-since-v1193) + - [Changelog since v1.19.4](#changelog-since-v1194) - [Changes by Kind](#changes-by-kind-11) + - [Feature](#feature-6) + - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-10) - [Dependencies](#dependencies-11) - [Added](#added-11) - [Changed](#changed-11) - [Removed](#removed-11) -- [v1.19.3](#v1193) - - [Downloads for v1.19.3](#downloads-for-v1193) +- [v1.19.4](#v1194) + - [Downloads for v1.19.4](#downloads-for-v1194) - [Source Code](#source-code-12) - [Client binaries](#client-binaries-12) - [Server binaries](#server-binaries-12) - [Node binaries](#node-binaries-12) - - [Changelog since v1.19.2](#changelog-since-v1192) + - [Changelog since v1.19.3](#changelog-since-v1193) - [Changes by Kind](#changes-by-kind-12) - - [Feature](#feature-7) - - [Design](#design) - [Bug or Regression](#bug-or-regression-11) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-12) - [Added](#added-12) - [Changed](#changed-12) - [Removed](#removed-12) -- [v1.19.2](#v1192) - - [Downloads for v1.19.2](#downloads-for-v1192) +- [v1.19.3](#v1193) + - [Downloads for v1.19.3](#downloads-for-v1193) - [Source Code](#source-code-13) - [Client binaries](#client-binaries-13) - [Server binaries](#server-binaries-13) - [Node binaries](#node-binaries-13) - - [Changelog since v1.19.1](#changelog-since-v1191) + - [Changelog since v1.19.2](#changelog-since-v1192) - [Changes by Kind](#changes-by-kind-13) - - [API Change](#api-change-3) + - [Feature](#feature-7) + - [Design](#design) - [Bug or Regression](#bug-or-regression-12) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-13) - [Added](#added-13) - [Changed](#changed-13) - [Removed](#removed-13) -- [v1.19.1](#v1191) - - [Downloads for v1.19.1](#downloads-for-v1191) +- [v1.19.2](#v1192) + - [Downloads for v1.19.2](#downloads-for-v1192) - [Source Code](#source-code-14) - [Client binaries](#client-binaries-14) - [Server binaries](#server-binaries-14) - [Node binaries](#node-binaries-14) - - [Changelog since v1.19.0](#changelog-since-v1190) + - [Changelog since v1.19.1](#changelog-since-v1191) - [Changes by Kind](#changes-by-kind-14) + - [API Change](#api-change-3) - [Bug or Regression](#bug-or-regression-13) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) - [Dependencies](#dependencies-14) - [Added](#added-14) - [Changed](#changed-14) - [Removed](#removed-14) +- [v1.19.1](#v1191) + - [Downloads for v1.19.1](#downloads-for-v1191) + - [Source Code](#source-code-15) + - [Client binaries](#client-binaries-15) + - [Server binaries](#server-binaries-15) + - [Node binaries](#node-binaries-15) + - [Changelog since v1.19.0](#changelog-since-v1190) + - [Changes by Kind](#changes-by-kind-15) + - [Bug or Regression](#bug-or-regression-14) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Dependencies](#dependencies-15) + - [Added](#added-15) + - [Changed](#changed-15) + - [Removed](#removed-15) - [v1.19.0](#v1190) - [Downloads for v1.19.0](#downloads-for-v1190) - - [Source Code](#source-code-15) - - [Client Binaries](#client-binaries-15) - - [Server Binaries](#server-binaries-15) - - [Node Binaries](#node-binaries-15) + - [Source Code](#source-code-16) + - [Client Binaries](#client-binaries-16) + - [Server Binaries](#server-binaries-16) + - [Node Binaries](#node-binaries-16) - [Changelog since v1.18.0](#changelog-since-v1180) - [What’s New (Major Themes)](#whats-new-major-themes) - [Deprecation warnings](#deprecation-warnings) @@ -250,177 +263,177 @@ - [Known Issues](#known-issues) - [Urgent Upgrade Notes](#urgent-upgrade-notes) - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - - [Changes by Kind](#changes-by-kind-15) + - [Changes by Kind](#changes-by-kind-16) - [Deprecation](#deprecation) - [API Change](#api-change-4) - [Feature](#feature-8) - [Documentation](#documentation) - [Failing Test](#failing-test-3) - - [Bug or Regression](#bug-or-regression-14) + - [Bug or Regression](#bug-or-regression-15) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) - - [Dependencies](#dependencies-15) - - [Added](#added-15) - - [Changed](#changed-15) - - [Removed](#removed-15) - [Dependencies](#dependencies-16) - [Added](#added-16) - [Changed](#changed-16) - [Removed](#removed-16) -- [v1.19.0-rc.4](#v1190-rc4) - - [Downloads for v1.19.0-rc.4](#downloads-for-v1190-rc4) - - [Source Code](#source-code-16) - - [Client binaries](#client-binaries-16) - - [Server binaries](#server-binaries-16) - - [Node binaries](#node-binaries-16) - - [Changelog since v1.19.0-rc.3](#changelog-since-v1190-rc3) - - [Changes by Kind](#changes-by-kind-16) - - [Deprecation](#deprecation-1) - - [Bug or Regression](#bug-or-regression-15) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-8) - [Dependencies](#dependencies-17) - [Added](#added-17) - [Changed](#changed-17) - [Removed](#removed-17) -- [v1.19.0-rc.3](#v1190-rc3) - - [Downloads for v1.19.0-rc.3](#downloads-for-v1190-rc3) +- [v1.19.0-rc.4](#v1190-rc4) + - [Downloads for v1.19.0-rc.4](#downloads-for-v1190-rc4) - [Source Code](#source-code-17) - [Client binaries](#client-binaries-17) - [Server binaries](#server-binaries-17) - [Node binaries](#node-binaries-17) - - [Changelog since v1.19.0-rc.2](#changelog-since-v1190-rc2) + - [Changelog since v1.19.0-rc.3](#changelog-since-v1190-rc3) - [Changes by Kind](#changes-by-kind-17) - - [API Change](#api-change-5) + - [Deprecation](#deprecation-1) - [Bug or Regression](#bug-or-regression-16) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-8) - [Dependencies](#dependencies-18) - [Added](#added-18) - [Changed](#changed-18) - [Removed](#removed-18) -- [v1.19.0-rc.2](#v1190-rc2) - - [Downloads for v1.19.0-rc.2](#downloads-for-v1190-rc2) +- [v1.19.0-rc.3](#v1190-rc3) + - [Downloads for v1.19.0-rc.3](#downloads-for-v1190-rc3) - [Source Code](#source-code-18) - [Client binaries](#client-binaries-18) - [Server binaries](#server-binaries-18) - [Node binaries](#node-binaries-18) - - [Changelog since v1.19.0-rc.1](#changelog-since-v1190-rc1) + - [Changelog since v1.19.0-rc.2](#changelog-since-v1190-rc2) - [Changes by Kind](#changes-by-kind-18) - - [API Change](#api-change-6) - - [Feature](#feature-9) + - [API Change](#api-change-5) - [Bug or Regression](#bug-or-regression-17) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-9) - [Dependencies](#dependencies-19) - [Added](#added-19) - [Changed](#changed-19) - [Removed](#removed-19) -- [v1.19.0-rc.1](#v1190-rc1) - - [Downloads for v1.19.0-rc.1](#downloads-for-v1190-rc1) +- [v1.19.0-rc.2](#v1190-rc2) + - [Downloads for v1.19.0-rc.2](#downloads-for-v1190-rc2) - [Source Code](#source-code-19) - [Client binaries](#client-binaries-19) - [Server binaries](#server-binaries-19) - [Node binaries](#node-binaries-19) - - [Changelog since v1.19.0-rc.0](#changelog-since-v1190-rc0) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.19.0-rc.1](#changelog-since-v1190-rc1) - [Changes by Kind](#changes-by-kind-19) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-7) - - [Feature](#feature-10) - - [Failing Test](#failing-test-4) + - [API Change](#api-change-6) + - [Feature](#feature-9) - [Bug or Regression](#bug-or-regression-18) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-10) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-9) - [Dependencies](#dependencies-20) - [Added](#added-20) - [Changed](#changed-20) - [Removed](#removed-20) -- [v1.19.0-beta.2](#v1190-beta2) - - [Downloads for v1.19.0-beta.2](#downloads-for-v1190-beta2) +- [v1.19.0-rc.1](#v1190-rc1) + - [Downloads for v1.19.0-rc.1](#downloads-for-v1190-rc1) - [Source Code](#source-code-20) - [Client binaries](#client-binaries-20) - [Server binaries](#server-binaries-20) - [Node binaries](#node-binaries-20) - - [Changelog since v1.19.0-beta.1](#changelog-since-v1190-beta1) + - [Changelog since v1.19.0-rc.0](#changelog-since-v1190-rc0) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-20) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-8) - - [Feature](#feature-11) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-7) + - [Feature](#feature-10) + - [Failing Test](#failing-test-4) - [Bug or Regression](#bug-or-regression-19) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-11) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-10) - [Dependencies](#dependencies-21) - [Added](#added-21) - [Changed](#changed-21) - [Removed](#removed-21) -- [v1.19.0-beta.1](#v1190-beta1) - - [Downloads for v1.19.0-beta.1](#downloads-for-v1190-beta1) +- [v1.19.0-beta.2](#v1190-beta2) + - [Downloads for v1.19.0-beta.2](#downloads-for-v1190-beta2) - [Source Code](#source-code-21) - [Client binaries](#client-binaries-21) - [Server binaries](#server-binaries-21) - [Node binaries](#node-binaries-21) - - [Changelog since v1.19.0-alpha.3](#changelog-since-v1190-alpha3) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) + - [Changelog since v1.19.0-beta.1](#changelog-since-v1190-beta1) - [Changes by Kind](#changes-by-kind-21) - - [API Change](#api-change-9) - - [Feature](#feature-12) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-8) + - [Feature](#feature-11) - [Bug or Regression](#bug-or-regression-20) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-12) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-11) - [Dependencies](#dependencies-22) - [Added](#added-22) - [Changed](#changed-22) - [Removed](#removed-22) -- [v1.19.0-beta.0](#v1190-beta0) - - [Downloads for v1.19.0-beta.0](#downloads-for-v1190-beta0) +- [v1.19.0-beta.1](#v1190-beta1) + - [Downloads for v1.19.0-beta.1](#downloads-for-v1190-beta1) - [Source Code](#source-code-22) - [Client binaries](#client-binaries-22) - [Server binaries](#server-binaries-22) - [Node binaries](#node-binaries-22) - - [Changelog since v1.19.0-alpha.3](#changelog-since-v1190-alpha3-1) + - [Changelog since v1.19.0-alpha.3](#changelog-since-v1190-alpha3) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) - [Changes by Kind](#changes-by-kind-22) - - [API Change](#api-change-10) - - [Feature](#feature-13) + - [API Change](#api-change-9) + - [Feature](#feature-12) - [Bug or Regression](#bug-or-regression-21) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-13) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-12) - [Dependencies](#dependencies-23) - [Added](#added-23) - [Changed](#changed-23) - [Removed](#removed-23) +- [v1.19.0-beta.0](#v1190-beta0) + - [Downloads for v1.19.0-beta.0](#downloads-for-v1190-beta0) + - [Source Code](#source-code-23) + - [Client binaries](#client-binaries-23) + - [Server binaries](#server-binaries-23) + - [Node binaries](#node-binaries-23) + - [Changelog since v1.19.0-alpha.3](#changelog-since-v1190-alpha3-1) + - [Changes by Kind](#changes-by-kind-23) + - [API Change](#api-change-10) + - [Feature](#feature-13) + - [Bug or Regression](#bug-or-regression-22) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-13) + - [Dependencies](#dependencies-24) + - [Added](#added-24) + - [Changed](#changed-24) + - [Removed](#removed-24) - [v1.19.0-alpha.3](#v1190-alpha3) - [Downloads for v1.19.0-alpha.3](#downloads-for-v1190-alpha3) - - [Source Code](#source-code-23) - - [Client Binaries](#client-binaries-23) - - [Server Binaries](#server-binaries-23) - - [Node Binaries](#node-binaries-23) - - [Changelog since v1.19.0-alpha.2](#changelog-since-v1190-alpha2) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) - - [Changes by Kind](#changes-by-kind-23) - - [Deprecation](#deprecation-4) - - [API Change](#api-change-11) - - [Feature](#feature-14) - - [Bug or Regression](#bug-or-regression-22) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-14) -- [v1.19.0-alpha.2](#v1190-alpha2) - - [Downloads for v1.19.0-alpha.2](#downloads-for-v1190-alpha2) - [Source Code](#source-code-24) - [Client Binaries](#client-binaries-24) - [Server Binaries](#server-binaries-24) - [Node Binaries](#node-binaries-24) - - [Changelog since v1.19.0-alpha.1](#changelog-since-v1190-alpha1) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-4) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-4) + - [Changelog since v1.19.0-alpha.2](#changelog-since-v1190-alpha2) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) - [Changes by Kind](#changes-by-kind-24) - - [API Change](#api-change-12) - - [Feature](#feature-15) + - [Deprecation](#deprecation-4) + - [API Change](#api-change-11) + - [Feature](#feature-14) - [Bug or Regression](#bug-or-regression-23) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-15) -- [v1.19.0-alpha.1](#v1190-alpha1) - - [Downloads for v1.19.0-alpha.1](#downloads-for-v1190-alpha1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-14) +- [v1.19.0-alpha.2](#v1190-alpha2) + - [Downloads for v1.19.0-alpha.2](#downloads-for-v1190-alpha2) - [Source Code](#source-code-25) - [Client Binaries](#client-binaries-25) - [Server Binaries](#server-binaries-25) - [Node Binaries](#node-binaries-25) + - [Changelog since v1.19.0-alpha.1](#changelog-since-v1190-alpha1) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-4) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-4) + - [Changes by Kind](#changes-by-kind-25) + - [API Change](#api-change-12) + - [Feature](#feature-15) + - [Bug or Regression](#bug-or-regression-24) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-15) +- [v1.19.0-alpha.1](#v1190-alpha1) + - [Downloads for v1.19.0-alpha.1](#downloads-for-v1190-alpha1) + - [Source Code](#source-code-26) + - [Client Binaries](#client-binaries-26) + - [Server Binaries](#server-binaries-26) + - [Node Binaries](#node-binaries-26) - [Changelog since v1.19.0-alpha.0](#changelog-since-v1190-alpha0) - [Urgent Upgrade Notes](#urgent-upgrade-notes-5) - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-5) - - [Changes by Kind](#changes-by-kind-25) + - [Changes by Kind](#changes-by-kind-26) - [Deprecation](#deprecation-5) - [API Change](#api-change-13) - [Feature](#feature-16) @@ -429,6 +442,75 @@ +# v1.19.16 + + +## Downloads for v1.19.16 + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes.tar.gz) | 5e1ef2158744bea5dab6bb7e8229fe07ffd829269a79b496af820c4cd3000b265ffcc6c6164842de7e3419b3f0bb47ebef3db74724f6524a57333df8b5c84bed +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-src.tar.gz) | 59bdd7de5c70c31158d4b16f13930fbca6dd29a584e47a6a2a81f2df16cda5aed1f059d0db8907cc1cb99b4f0e8263ae26fea2f494238dc0eb3f1eb211a6b2c3 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-darwin-amd64.tar.gz) | 1c90e3e01b345f70ca94c7e89c6a10dab5ed2623b4f66674aefbcdae7a857cb240f39b9f84447e37c4dba815ce10aa648620cb268246261af1cbae5c080aeff4 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-386.tar.gz) | 967a79de8533aeae18cc5f8273b57ed75e8a40b0ed9e6472ca79fe5195d99768b9422c7361812237d5bba177939e934a08052c6a7ff29976f27ecd0691d30295 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-amd64.tar.gz) | 9524a026af932ac9ca1895563060f7fb3b89f1387016e69a1a73cf7ce0f9baa54775b00c886557a97bae9b6dbc1b49c045da5dcea9ca2c1452c18c5c45fefd55 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-arm.tar.gz) | 5853f42c40ebbe620435bea036ac098a9ea206fd63dc13845e97a43c422fd3b47977b90fdbf76b6d13ea908210a77b5410066dc388dacccd88b47c1cb5ccef75 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-arm64.tar.gz) | eaf958630e85a2f71bd928736c65b97fb8e8bb376ab83a7e6c584e0379d51e98e355a6b94a40eaa40f641c2b10723c6b5844223a52143ec4e0690baeb6ba9151 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-ppc64le.tar.gz) | f3b7790a2343c2cc327af115172dac136a7b3977e5921a71146bf2f4626660b93844c1dab02e3f63293b281b52ba2c3d8562c0ce405f4a4611542470fd562ef9 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-linux-s390x.tar.gz) | 6e832b268b09bef05a932780c4c442a1dde37bd0ea29e318697c38a97a6b24acc13516eb8a7253f0b96dc3beeddd3b69c07813d7726a6b9955c6ff626847c933 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-windows-386.tar.gz) | ce7bde796184c28f25d0bc4bde0e453241e72f067cc84f716cf8f01e1a9e1e98af73a656e73bb1eb2792a90f953bb76d0157317dc2c76dfe0eb2dee9637d12ea +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-client-windows-amd64.tar.gz) | 95d487bc5c575b27f5121de8fadeb955a4d5baf6260fecb490bebdac5891f7a679878cbf76266496eef7f95e18fde08f445eb26cb7761165002831f0f6b5712f + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-server-linux-amd64.tar.gz) | c6007430e6e568728ec694d13d69315d2f97054f17552e04b10d628b45679382c0eaf9a4a7264b6c19e2aad18d20a6df1d3882781f88a3d746ab57e102becdae +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-server-linux-arm.tar.gz) | 0d52e1b495e752afa99a059a752225f5b7f45a4ce3bc43ed830f68fdeb7699a8825cbfded154ea82741aac81a123a63111ca4ffe558576929f10474b5097a34f +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-server-linux-arm64.tar.gz) | 07ef317adeb556649fdff27a3cd0c831536adb8b626d507d83a223b3c3edf00a6d26e52fc49f73e7a30809496e889fa2e0cf3c4b7052fe1ab9f6eb9328285d74 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-server-linux-ppc64le.tar.gz) | 49b0ce4a7feb2214a0d964f7c5c4911ebdc740375fbf211b7a7f7d53187f9ac80243a7e826ff4586d49c998e3951d737495c18f1598d0915b700747626e8f3a9 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-server-linux-s390x.tar.gz) | 18c02a42b07533d61c4cc9419d010355d46400a4bd69d8bbfb17c2c8a29df5f214c39cee84e701e5d87130b3c456cfc19d2256cc266f769b43f9d4a4b4b71959 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-linux-amd64.tar.gz) | fa3a29eae5e047ec99049a789d4793e05bdbdc9a31c0216a20cb60bc7aada2750d90bdfc6746e04d77e84390d2149a68f466aab4222e32dcf50ffb527e4c97e1 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-linux-arm.tar.gz) | ee84c6a9a1c3b3a060f9bc731230fa9797e2dc0c5f071ac747542e0f0f121d93a26b415a754d921b198abb6f8953d3830d54e78962a11bd6914f4507fa5c5630 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-linux-arm64.tar.gz) | 07326ef4e2fee2ada9eb16852b0e62f999ed0f116782624fa336a9dbc99353080a07ccc7990effb88f82f922ef2063a19e7c85b094590bbac1d4967f38db68fe +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-linux-ppc64le.tar.gz) | 4e13492dd80cc3b147a3f0621e6a2d0568ba3aa4d902dfa1269c69c4756321c715b129e5d6914932931ba73136a2c5ceb3770c1d90a7c2a2a14c826ee92cb39a +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-linux-s390x.tar.gz) | 4448a179b09f8d1b288c06cfa0183938697f66ec26882cf76032a7d06876a9cda8cbfb83aa725314e1aa10ab2ca5ddfd5b0984d8feaf8bd5f77a1bbdd967ef33 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.19.16/kubernetes-node-windows-amd64.tar.gz) | 02ec4b58a484b1c5fb2a0b6e10d337a44df1f89b666cbd73622d5b3782467b57953a56444e18c4c886c9e6c421846d0232b8663563ebd7e70ec7887c4b243529 + +## Changelog since v1.19.15 + +## Changes by Kind + +### Bug or Regression + +- Fix detach from non-existant nodes and support dangling volumes (#104912, @gnufied) [SIG Cloud Provider and Storage] +- Revert PR #102925 which introduced unexpected scheduling behavior based on balanced resource allocation (#105526, @harisund) [SIG Scheduling] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + + + # v1.19.15 From 7c5a78bb918b53019dfe33bb1b65c901f221321c Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 26 Oct 2021 17:24:15 -0400 Subject: [PATCH 5/9] PodSecurity: clean up namespace validation messages, time bounding, and add testing --- .../admission/admission.go | 48 +++++++--- .../admission/admission_test.go | 92 ++++++++++++++++--- 2 files changed, 114 insertions(+), 26 deletions(-) diff --git a/staging/src/k8s.io/pod-security-admission/admission/admission.go b/staging/src/k8s.io/pod-security-admission/admission/admission.go index 4059b62782b..a3a698d8d92 100644 --- a/staging/src/k8s.io/pod-security-admission/admission/admission.go +++ b/staging/src/k8s.io/pod-security-admission/admission/admission.go @@ -41,8 +41,8 @@ import ( ) const ( - namespaceMaxPodsToCheck = 3000 - namespacePodCheckTimeout = 1 * time.Second + defaultNamespaceMaxPodsToCheck = 3000 + defaultNamespacePodCheckTimeout = 1 * time.Second ) // Admission implements the core admission logic for the Pod Security Admission controller. @@ -64,6 +64,9 @@ type Admission struct { PodLister PodLister defaultPolicy api.Policy + + namespaceMaxPodsToCheck int + namespacePodCheckTimeout time.Duration } type NamespaceGetter interface { @@ -152,6 +155,8 @@ func (a *Admission) CompleteConfiguration() error { a.defaultPolicy = p } } + a.namespaceMaxPodsToCheck = defaultNamespaceMaxPodsToCheck + a.namespacePodCheckTimeout = defaultNamespacePodCheckTimeout if a.PodSpecExtractor == nil { a.PodSpecExtractor = &DefaultPodSpecExtractor{} @@ -173,6 +178,9 @@ func (a *Admission) ValidateConfiguration() error { return fmt.Errorf("default policy does not match; CompleteConfiguration() was not called before ValidateConfiguration()") } } + if a.namespaceMaxPodsToCheck == 0 || a.namespacePodCheckTimeout == 0 { + return fmt.Errorf("namespace configuration not set; CompleteConfiguration() was not called before ValidateConfiguration()") + } if a.Metrics == nil { return fmt.Errorf("Metrics recorder required") } @@ -409,14 +417,14 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli } if klog.V(5).Enabled() { - klog.InfoS("Pod Security evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName()) + klog.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName()) } response := allowedResponse() if enforce { if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Enforce, podMetadata, podSpec)); !result.Allowed { response = forbiddenResponse(fmt.Sprintf( - "Pod violates PodSecurity %q: %s", + "pod violates PodSecurity %q: %s", nsPolicy.Enforce.String(), result.ForbiddenDetail(), )) @@ -429,7 +437,7 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli // TODO: reuse previous evaluation if audit level+version is the same as enforce level+version if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Audit, podMetadata, podSpec)); !result.Allowed { auditAnnotations["audit"] = fmt.Sprintf( - "Would violate PodSecurity %q: %s", + "would violate PodSecurity %q: %s", nsPolicy.Audit.String(), result.ForbiddenDetail(), ) @@ -442,7 +450,7 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Warn, podMetadata, podSpec)); !result.Allowed { // TODO: Craft a better user-facing warning message response.Warnings = append(response.Warnings, fmt.Sprintf( - "Would violate PodSecurity %q: %s", + "would violate PodSecurity %q: %s", nsPolicy.Warn.String(), result.ForbiddenDetail(), )) @@ -464,9 +472,10 @@ type podCount struct { } func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace string, enforce api.LevelVersion) []string { - timeout := namespacePodCheckTimeout + // start with the default timeout + timeout := a.namespacePodCheckTimeout if deadline, ok := ctx.Deadline(); ok { - timeRemaining := time.Duration(0.9 * float64(time.Until(deadline))) // Leave a little time to respond. + timeRemaining := time.Until(deadline) / 2 // don't take more than half the remaining time if timeout > timeRemaining { timeout = timeRemaining } @@ -477,8 +486,8 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin pods, err := a.PodLister.ListPods(ctx, namespace) if err != nil { - klog.ErrorS(err, "Failed to list pods", "namespace", namespace) - return []string{"Failed to list pods"} + klog.ErrorS(err, "failed to list pods", "namespace", namespace) + return []string{"failed to list pods while checking new PodSecurity enforce level"} } var ( @@ -487,11 +496,12 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin podWarnings []string podWarningsToCount = make(map[string]podCount) ) - if len(pods) > namespaceMaxPodsToCheck { - warnings = append(warnings, fmt.Sprintf("Large namespace: only checking the first %d of %d pods", namespaceMaxPodsToCheck, len(pods))) - pods = pods[0:namespaceMaxPodsToCheck] + totalPods := len(pods) + if len(pods) > a.namespaceMaxPodsToCheck { + pods = pods[0:a.namespaceMaxPodsToCheck] } + checkedPods := len(pods) for i, pod := range pods { // short-circuit on exempt runtimeclass if a.exemptRuntimeClass(pod.Spec.RuntimeClassName) { @@ -510,12 +520,20 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin c.podCount++ podWarningsToCount[warning] = c } - if time.Now().After(deadline) { - warnings = append(warnings, fmt.Sprintf("Timeout reached after checking %d pods", i+1)) + if err := ctx.Err(); err != nil { // deadline exceeded or context was cancelled + checkedPods = i + 1 break } } + if checkedPods < totalPods { + warnings = append(warnings, fmt.Sprintf("new PodSecurity enforce level only checked against the first %d of %d existing pods", checkedPods, totalPods)) + } + + if len(podWarnings) > 0 { + warnings = append(warnings, fmt.Sprintf("existing pods in namespace %q violate the new PodSecurity enforce level %q", namespace, enforce.String())) + } + // prepend pod names to warnings decoratePodWarnings(podWarningsToCount, podWarnings) // put warnings in a deterministic order diff --git a/staging/src/k8s.io/pod-security-admission/admission/admission_test.go b/staging/src/k8s.io/pod-security-admission/admission/admission_test.go index 90bf7605bf1..da198f8391e 100644 --- a/staging/src/k8s.io/pod-security-admission/admission/admission_test.go +++ b/staging/src/k8s.io/pod-security-admission/admission/admission_test.go @@ -21,6 +21,7 @@ import ( "reflect" "strings" "testing" + "time" "github.com/stretchr/testify/assert" @@ -174,9 +175,14 @@ func TestDefaultHasPodSpec(t *testing.T) { type testEvaluator struct { lv api.LevelVersion + + delay time.Duration } func (t *testEvaluator) EvaluatePod(lv api.LevelVersion, meta *metav1.ObjectMeta, spec *corev1.PodSpec) []policy.CheckResult { + if t.delay > 0 { + time.Sleep(t.delay) + } t.lv = lv if meta.Annotations["error"] != "" { return []policy.CheckResult{{Allowed: false, ForbiddenReason: meta.Annotations["error"]}} @@ -196,10 +202,17 @@ func (t *testNamespaceGetter) GetNamespace(ctx context.Context, name string) (*c type testPodLister struct { called bool pods []*corev1.Pod + delay time.Duration } func (t *testPodLister) ListPods(ctx context.Context, namespace string) ([]*corev1.Pod, error) { t.called = true + if t.delay > 0 { + time.Sleep(t.delay) + } + if err := ctx.Err(); err != nil { + return nil, err + } return t.pods, nil } @@ -218,6 +231,10 @@ func TestValidateNamespace(t *testing.T) { oldLabels map[string]string // list of pods to return pods []*corev1.Pod + // time to sleep while listing + delayList time.Duration + // time to sleep while evaluating + delayEvaluation time.Duration expectAllowed bool expectError string @@ -352,7 +369,11 @@ func TestValidateNamespace(t *testing.T) { expectAllowed: true, expectListPods: true, expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()}, - expectWarnings: []string{"noruntimeclasspod (and 2 other pods): message", "runtimeclass3pod: message, message2"}, + expectWarnings: []string{ + `existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`, + "noruntimeclasspod (and 2 other pods): message", + "runtimeclass3pod: message, message2", + }, }, { name: "update with runtimeclass exempt pods", @@ -362,11 +383,57 @@ func TestValidateNamespace(t *testing.T) { expectAllowed: true, expectListPods: true, expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()}, - expectWarnings: []string{"noruntimeclasspod (and 1 other pod): message", "runtimeclass3pod: message, message2"}, + expectWarnings: []string{ + `existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`, + "noruntimeclasspod (and 1 other pod): message", + "runtimeclass3pod: message, message2", + }, + }, + { + name: "timeout on list", + newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)}, + oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)}, + delayList: time.Second + 100*time.Millisecond, + expectAllowed: true, + expectListPods: true, + expectWarnings: []string{ + `failed to list pods while checking new PodSecurity enforce level`, + }, + }, + { + name: "timeout on evaluate", + newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)}, + oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)}, + delayEvaluation: (time.Second + 100*time.Millisecond) / 2, // leave time for two evaluations + expectAllowed: true, + expectListPods: true, + expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()}, + expectWarnings: []string{ + `new PodSecurity enforce level only checked against the first 2 of 4 existing pods`, + `existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`, + `noruntimeclasspod (and 1 other pod): message`, + }, + }, + { + name: "bound number of pods", + newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)}, + oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)}, + pods: []*corev1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "pod1", Annotations: map[string]string{"error": "message"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod2", Annotations: map[string]string{"error": "message"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod3", Annotations: map[string]string{"error": "message"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod4", Annotations: map[string]string{"error": "message"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod5", Annotations: map[string]string{"error": "message"}}}, + }, + expectAllowed: true, + expectListPods: true, + expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()}, + expectWarnings: []string{ + `new PodSecurity enforce level only checked against the first 4 of 5 existing pods`, + `existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`, + `pod1 (and 3 other pods): message`, + }, }, - - // TODO: test for bounding evalution time with a warning - // TODO: test for bounding pod count with a warning // TODO: test for prioritizing evaluating pods from unique controllers } @@ -428,8 +495,8 @@ func TestValidateNamespace(t *testing.T) { }, } } - podLister := &testPodLister{pods: pods} - evaluator := &testEvaluator{} + podLister := &testPodLister{pods: pods, delay: tc.delayList} + evaluator := &testEvaluator{delay: tc.delayEvaluation} a := &Admission{ PodLister: podLister, Evaluator: evaluator, @@ -441,6 +508,9 @@ func TestValidateNamespace(t *testing.T) { }, Metrics: NewMockRecorder(), defaultPolicy: defaultPolicy, + + namespacePodCheckTimeout: time.Second, + namespaceMaxPodsToCheck: 4, } result := a.ValidateNamespace(context.TODO(), attrs) if result.Allowed != tc.expectAllowed { @@ -567,16 +637,16 @@ func TestValidatePodController(t *testing.T) { desc: "bad deploy creates produce correct user-visible warnings and correct auditAnnotations", newObject: &badDeploy, gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - expectAuditAnnotations: map[string]string{"audit": "Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, - expectWarnings: []string{"Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, + expectAuditAnnotations: map[string]string{"audit": "would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, + expectWarnings: []string{"would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, }, { desc: "bad spec updates don't block on enforce failures and returns correct information", newObject: &badDeploy, oldObject: &goodDeploy, gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - expectAuditAnnotations: map[string]string{"audit": "Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, - expectWarnings: []string{"Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, + expectAuditAnnotations: map[string]string{"audit": "would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, + expectWarnings: []string{"would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"}, }, } From 2a4701c2ca68ddeebd307bb6eed2320186059a39 Mon Sep 17 00:00:00 2001 From: Samuel Roth Date: Wed, 29 Sep 2021 20:13:23 -0400 Subject: [PATCH 6/9] PodSecurity webhook image --- .../k8s.io/pod-security-admission/.gitignore | 2 + .../pod-security-admission/webhook/Dockerfile | 19 +++++ .../pod-security-admission/webhook/Makefile | 49 +++++++++++++ .../pod-security-admission/webhook/README.md | 44 ++++++++++++ .../webhook/kustomization.yaml | 9 +++ .../webhook/manifests/10-namespace.yaml | 4 ++ .../webhook/manifests/20-configmap.yaml | 33 +++++++++ .../webhook/manifests/20-serviceaccount.yaml | 5 ++ .../webhook/manifests/30-clusterrole.yaml | 8 +++ .../manifests/40-clusterrolebinding.yaml | 12 ++++ .../webhook/manifests/50-deployment.yaml | 63 +++++++++++++++++ .../webhook/manifests/60-service.yaml | 15 ++++ .../70-validatingwebhookconfiguration.yaml | 70 +++++++++++++++++++ 13 files changed, 333 insertions(+) create mode 100644 staging/src/k8s.io/pod-security-admission/.gitignore create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/Dockerfile create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/Makefile create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/README.md create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/10-namespace.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/20-configmap.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/20-serviceaccount.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/30-clusterrole.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/40-clusterrolebinding.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/50-deployment.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/60-service.yaml create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml diff --git a/staging/src/k8s.io/pod-security-admission/.gitignore b/staging/src/k8s.io/pod-security-admission/.gitignore new file mode 100644 index 00000000000..f5c81b02256 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/.gitignore @@ -0,0 +1,2 @@ +# Webhook binary +pod-security-webhook \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/Dockerfile b/staging/src/k8s.io/pod-security-admission/webhook/Dockerfile new file mode 100644 index 00000000000..9ddbac4a748 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/Dockerfile @@ -0,0 +1,19 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/distroless/static:latest + +COPY pod-security-webhook /pod-security-webhook + +ENTRYPOINT [ "/pod-security-webhook" ] \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/Makefile b/staging/src/k8s.io/pod-security-admission/webhook/Makefile new file mode 100644 index 00000000000..41f4f14b04a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/Makefile @@ -0,0 +1,49 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: build container push clean + +ENTRYPOINT = "../cmd/webhook/webhook.go" +EXECUTABLE = "pod-security-webhook" + +IMAGE_DOCKERFILE = "Dockerfile" +REGISTRY ?= "gcr.io/k8s-staging-sig-auth" +IMAGE ?= "$(REGISTRY)/pod-security-webhook" +TAG ?= "latest" + +OS ?= linux +ARCH ?= amd64 + +# Builds the PodSecurity webhook binary. +build: + @echo Building PodSecurity webhook... + @GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 \ + go build -o $(EXECUTABLE) $(ENTRYPOINT) + @echo Done! + +# Builds the PodSecurity webhook Docker image. +container: build + @echo Building PodSecurity webhook image... + @docker build \ + -f $(IMAGE_DOCKERFILE) \ + -t $(IMAGE):$(TAG) . + @echo Done! + +# Publishes the PodSecurity webhook Docker image to the configured registry. +push: + @docker push $(IMAGE):$(TAG) + +# Removes Pod Security Webhook artifacts. +clean: + rm $(EXECUTABLE) \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/README.md b/staging/src/k8s.io/pod-security-admission/webhook/README.md new file mode 100644 index 00000000000..063d65ba643 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/README.md @@ -0,0 +1,44 @@ +# Pod Security Admission Webhook + +This directory contains files for a _Validating Admission Webhook_ that checks for conformance to the Pod Security Standards. It is built with the same Go package as the [Pod Security Admission Controller](https://kubernetes.io/docs/concepts/security/pod-security-admission/). The webhook is suitable for environments where the built-in PodSecurity admission controller cannot be used. + +For more information, see the [Dynamic Admission Control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) documentation on the Kubernetes website. + +## Getting Started + +The webhook is available as a Docker image that lives within the SIG-Auth container registry. In addition to the `Dockerfile` for the webhook, this directory also contains sample Kubernetes manifests that can be used to deploy the webhook to a Kubernetes cluster. + +### Configuring the Webhook Certificate + +You will need to provide a cert-key pair to serve the webhook securely. See the [Kubernetes documentation on certificates](https://kubernetes.io/docs/tasks/administer-cluster/certificates/#cfssl) for instructions on generating these files. + +```bash +export CERT_PATH="..." +export KEY_PATH="..." + +kubectl create secret tls pod-security-webhook -n pod-security-webhook \ + --cert=$CERT_PATH \ + --key=$KEY_PATH +``` + +### Deploying the Webhook + +Apply the manifests to install the webhook in your cluster: + +```bash +kubectl apply -f manifests +``` + +Optionally, override the default configuration with [Kustomize](https://kustomize.io): + +```bash +kustomize build $OVERLAY_DIRECTORY +``` + +### Configuring the Webhook + +Similar to the Pod Security Admission Controller, the webhook requires a configuration file to determine how incoming resources are validated. For real-world deployments, we highly recommend reviewing our [documentation on selecting appropriate policy levels](https://kubernetes.io/docs/tasks/configure-pod-container/migrate-from-psp/#steps). + +## Contributing + +Please see the [contributing guidelines](../CONTRIBUTING.md) in the parent directory for general information about contributing to this project. diff --git a/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml b/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml new file mode 100644 index 00000000000..18189bf487f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- manifests/10-namespace.yaml +- manifests/20-configmap.yaml +- manifests/20-serviceaccount.yaml +- manifests/30-clusterrole.yaml +- manifests/40-clusterrolebinding.yaml +- manifests/50-deployment.yaml +- manifests/60-service.yaml +- manifests/70-validatingwebhookconfiguration.yaml diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/10-namespace.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/10-namespace.yaml new file mode 100644 index 00000000000..5a1d492060c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/10-namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: pod-security-webhook \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-configmap.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-configmap.yaml new file mode 100644 index 00000000000..4d1c73bf6d4 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-configmap.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: pod-security-webhook + namespace: pod-security-webhook +data: + podsecurityconfiguration.yaml: | + apiVersion: pod-security.admission.config.k8s.io/v1alpha1 + kind: PodSecurityConfiguration + # Defaults applied when a mode label is not set. + # + # Level label values must be one of: + # - "privileged" (default) + # - "baseline" + # - "restricted" + # + # Version label values must be one of: + # - "latest" (default) + # - specific version like "v1.22" + defaults: + enforce: "privileged" + enforce-version: "latest" + audit: "privileged" + audit-version: "latest" + warn: "privileged" + warn-version: "latest" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClasses: [] + # Array of namespaces to exempt. + namespaces: [] diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-serviceaccount.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-serviceaccount.yaml new file mode 100644 index 00000000000..3a353bc7303 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-security-webhook + namespace: pod-security-webhook \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/30-clusterrole.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/30-clusterrole.yaml new file mode 100644 index 00000000000..5d14e89013c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/30-clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pod-security-webhook +rules: + - apiGroups: [""] + resources: ["pods", "namespaces"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/40-clusterrolebinding.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/40-clusterrolebinding.yaml new file mode 100644 index 00000000000..2263986aac2 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/40-clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pod-security-webhook +subjects: + - kind: ServiceAccount + name: pod-security-webhook + namespace: pod-security-webhook +roleRef: + kind: ClusterRole + name: pod-security-webhook + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/50-deployment.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/50-deployment.yaml new file mode 100644 index 00000000000..104321e2cf2 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/50-deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pod-security-webhook + namespace: pod-security-webhook + labels: + app: pod-security-webhook +spec: + selector: + matchLabels: + app: pod-security-webhook + template: + metadata: + labels: + app: pod-security-webhook + spec: + serviceAccountName: pod-security-webhook + priorityClassName: system-cluster-critical + volumes: + - name: config + configMap: + name: pod-security-webhook + - name: pki + secret: + secretName: pod-security-webhook + containers: + - name: pod-security-webhook + image: k8s.gcr.io/sig-auth/pod-security-webhook:v1.22-alpha.0 + terminationMessagePolicy: FallbackToLogsOnError + ports: + - containerPort: 8443 + args: + [ + "--config", + "/etc/config/podsecurityconfiguration.yaml", + "--tls-cert-file", + "/etc/pki/tls.crt", + "--tls-private-key-file", + "/etc/pki/tls.key", + "--secure-port", + "8443", + ] + resources: + requests: + cpu: 100m + limits: + cpu: 500m + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: "/etc/config" + readOnly: true + - name: pki + mountPath: "/etc/pki" + readOnly: true diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/60-service.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/60-service.yaml new file mode 100644 index 00000000000..0b5f66f4035 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/60-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook + namespace: pod-security-webhook + labels: + app: pod-security-webhook +spec: + ports: + - port: 443 + targetPort: 8443 + protocol: TCP + name: https + selector: + app: pod-security-webhook \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml new file mode 100644 index 00000000000..4a1f2aca7e2 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml @@ -0,0 +1,70 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: "pod-security-webhook.kubernetes.io" +webhooks: + # Audit annotations will be prefixed with this name + - name: "pod-security-webhook.kubernetes.io" + # Fail-closed admission webhooks can present operational challenges. + # You may want to consider using a failure policy of Ignore, but should + # consider the security tradeoffs. + failurePolicy: Fail + namespaceSelector: + # Exempt the webhook itself to avoid a circular dependency. + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: ["pod-security-webhook"] + rules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: + - namespaces + - pods + - pods/ephemeralcontainers + clientConfig: + service: + namespace: "pod-security-webhook" + name: "webhook" + admissionReviewVersions: ["v1"] + sideEffects: None + timeoutSeconds: 5 + + # Audit annotations will be prefixed with this name + - name: "advisory.pod-security-webhook.kubernetes.io" + # Non-enforcing resources can safely fail-open. + failurePolicy: Ignore + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: ["pod-security-webhook"] + rules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: + - podtemplates + - replicationcontrollers + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + - apiGroups: ["batch"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: + - cronjobs + - jobs + clientConfig: + service: + namespace: "pod-security-webhook" + name: "webhook" + admissionReviewVersions: ["v1"] + sideEffects: None + timeoutSeconds: 5 From 0be8280faa1e9c8911f1e39f768a20079ccf8308 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 26 Oct 2021 21:39:03 +0000 Subject: [PATCH 7/9] PodSecurity: Set version build flags in makefile Change-Id: I719e7ce1efce9014e24903f0ad203a52a207f892 --- .../src/k8s.io/pod-security-admission/webhook/Makefile | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/pod-security-admission/webhook/Makefile b/staging/src/k8s.io/pod-security-admission/webhook/Makefile index 41f4f14b04a..f7e501fedc4 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/Makefile +++ b/staging/src/k8s.io/pod-security-admission/webhook/Makefile @@ -17,6 +17,9 @@ ENTRYPOINT = "../cmd/webhook/webhook.go" EXECUTABLE = "pod-security-webhook" +# Relative to location in staging dir +KUBE_ROOT = "../../../../.." + IMAGE_DOCKERFILE = "Dockerfile" REGISTRY ?= "gcr.io/k8s-staging-sig-auth" IMAGE ?= "$(REGISTRY)/pod-security-webhook" @@ -28,8 +31,8 @@ ARCH ?= amd64 # Builds the PodSecurity webhook binary. build: @echo Building PodSecurity webhook... - @GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 \ - go build -o $(EXECUTABLE) $(ENTRYPOINT) + @LDFLAGS=`cd -P . && /usr/bin/env bash -c '. $(KUBE_ROOT)/hack/lib/version.sh && KUBE_ROOT=$(KUBE_ROOT) KUBE_GO_PACKAGE=k8s.io/kubernetes kube::version::ldflags'`; \ + GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 go build -o $(EXECUTABLE) -ldflags "$$LDFLAGS" $(ENTRYPOINT) @echo Done! # Builds the PodSecurity webhook Docker image. @@ -46,4 +49,4 @@ push: # Removes Pod Security Webhook artifacts. clean: - rm $(EXECUTABLE) \ No newline at end of file + rm $(EXECUTABLE) From a356c32797bc221aaccf5e907f67dadd1bd61bc6 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 27 Oct 2021 12:53:30 -0400 Subject: [PATCH 8/9] PodSecurity: simplify pki setup --- .../k8s.io/pod-security-admission/.gitignore | 5 ++- .../pod-security-admission/webhook/Makefile | 16 ++++++- .../pod-security-admission/webhook/README.md | 21 +++------- .../webhook/kustomization.yaml | 42 +++++++++++++++---- .../70-validatingwebhookconfiguration.yaml | 4 ++ .../webhook/manifests/kustomization.yaml | 9 ++++ 6 files changed, 70 insertions(+), 27 deletions(-) create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml diff --git a/staging/src/k8s.io/pod-security-admission/.gitignore b/staging/src/k8s.io/pod-security-admission/.gitignore index f5c81b02256..ec2ce3d4b0a 100644 --- a/staging/src/k8s.io/pod-security-admission/.gitignore +++ b/staging/src/k8s.io/pod-security-admission/.gitignore @@ -1,2 +1,5 @@ # Webhook binary -pod-security-webhook \ No newline at end of file +pod-security-webhook + +# Directory containing pki files +pki/ \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/Makefile b/staging/src/k8s.io/pod-security-admission/webhook/Makefile index f7e501fedc4..1e8587343ee 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/Makefile +++ b/staging/src/k8s.io/pod-security-admission/webhook/Makefile @@ -43,10 +43,24 @@ container: build -t $(IMAGE):$(TAG) . @echo Done! +# Creates a CA and serving certificate valid for webhook.pod-security-webhook.svc +certs: + rm -fr pki + mkdir -p pki + openssl genrsa -out pki/ca.key 2048 + openssl req -new -x509 -days 3650 -key pki/ca.key -subj "/CN=pod-security-webhook-ca-$(date +%s)" -out pki/ca.crt + + openssl req -newkey rsa:2048 -nodes -keyout pki/tls.key -subj "/CN=webhook.pod-security-webhook.svc" -out pki/tls.csr + + echo "subjectAltName=DNS:webhook.pod-security-webhook.svc" > pki/extensions.txt + echo "extendedKeyUsage=serverAuth" >> pki/extensions.txt + openssl x509 -req -extfile pki/extensions.txt -days 730 -in pki/tls.csr -CA pki/ca.crt -CAkey pki/ca.key -CAcreateserial -out pki/tls.crt + # Publishes the PodSecurity webhook Docker image to the configured registry. push: @docker push $(IMAGE):$(TAG) # Removes Pod Security Webhook artifacts. clean: - rm $(EXECUTABLE) + rm -f $(EXECUTABLE) + rm -fr pki diff --git a/staging/src/k8s.io/pod-security-admission/webhook/README.md b/staging/src/k8s.io/pod-security-admission/webhook/README.md index 063d65ba643..0bed2b98c83 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/README.md +++ b/staging/src/k8s.io/pod-security-admission/webhook/README.md @@ -10,30 +10,19 @@ The webhook is available as a Docker image that lives within the SIG-Auth contai ### Configuring the Webhook Certificate -You will need to provide a cert-key pair to serve the webhook securely. See the [Kubernetes documentation on certificates](https://kubernetes.io/docs/tasks/administer-cluster/certificates/#cfssl) for instructions on generating these files. - -```bash -export CERT_PATH="..." -export KEY_PATH="..." - -kubectl create secret tls pod-security-webhook -n pod-security-webhook \ - --cert=$CERT_PATH \ - --key=$KEY_PATH -``` +Run `make certs` to generate a CA and serving certificate valid for `https://webhook.pod-security-webhook.svc`. ### Deploying the Webhook Apply the manifests to install the webhook in your cluster: ```bash -kubectl apply -f manifests +kubectl apply -k . ``` -Optionally, override the default configuration with [Kustomize](https://kustomize.io): - -```bash -kustomize build $OVERLAY_DIRECTORY -``` +This applies the manifests in the `manifests` subdirectory, +creates a secret containing the serving certificate, +and injects the CA bundle to the validating webhook. ### Configuring the Webhook diff --git a/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml b/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml index 18189bf487f..ac22d13e8ab 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml +++ b/staging/src/k8s.io/pod-security-admission/webhook/kustomization.yaml @@ -1,9 +1,33 @@ -resources: -- manifests/10-namespace.yaml -- manifests/20-configmap.yaml -- manifests/20-serviceaccount.yaml -- manifests/30-clusterrole.yaml -- manifests/40-clusterrolebinding.yaml -- manifests/50-deployment.yaml -- manifests/60-service.yaml -- manifests/70-validatingwebhookconfiguration.yaml +# include the manifests +bases: +- ./manifests + +# generate the secret +# this depends on pki files, which can be created (or regenerated) with `make certs` +secretGenerator: +- name: pod-security-webhook + namespace: pod-security-webhook + type: kubernetes.io/tls + options: + disableNameSuffixHash: true + files: + - pki/ca.crt + - pki/tls.crt + - pki/tls.key + +# inject the CA into the validating webhook +replacements: +- source: + kind: Secret + name: pod-security-webhook + namespace: pod-security-webhook + fieldPath: data.ca\.crt + targets: + - select: + kind: ValidatingWebhookConfiguration + name: pod-security-webhook.kubernetes.io + fieldPaths: + - webhooks.0.clientConfig.caBundle + - webhooks.1.clientConfig.caBundle + options: + create: true diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml index 4a1f2aca7e2..c185c26c18e 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/70-validatingwebhookconfiguration.yaml @@ -24,6 +24,8 @@ webhooks: - pods - pods/ephemeralcontainers clientConfig: + # Populate with the CA for the serving certificate + caBundle: "" service: namespace: "pod-security-webhook" name: "webhook" @@ -62,6 +64,8 @@ webhooks: - cronjobs - jobs clientConfig: + # Populate with the CA for the serving certificate + caBundle: "" service: namespace: "pod-security-webhook" name: "webhook" diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml new file mode 100644 index 00000000000..f637494436e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- 10-namespace.yaml +- 20-configmap.yaml +- 20-serviceaccount.yaml +- 30-clusterrole.yaml +- 40-clusterrolebinding.yaml +- 50-deployment.yaml +- 60-service.yaml +- 70-validatingwebhookconfiguration.yaml From 09e9ba99abe1ccc4742b2be68e7b778ffb982e10 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 27 Oct 2021 13:01:41 -0400 Subject: [PATCH 9/9] PodSecurity: add resource quota for clusters that limit cluster-critical by default --- .../webhook/manifests/20-resourcequota.yaml | 14 ++++++++++++++ .../webhook/manifests/kustomization.yaml | 1 + 2 files changed, 15 insertions(+) create mode 100644 staging/src/k8s.io/pod-security-admission/webhook/manifests/20-resourcequota.yaml diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-resourcequota.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-resourcequota.yaml new file mode 100644 index 00000000000..0c90bd22bda --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/20-resourcequota.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: pod-security-webhook + namespace: pod-security-webhook +spec: + hard: + pods: 3 + scopeSelector: + matchExpressions: + - operator: In + scopeName: PriorityClass + values: + - system-cluster-critical \ No newline at end of file diff --git a/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml b/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml index f637494436e..8320af4a6d1 100644 --- a/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml +++ b/staging/src/k8s.io/pod-security-admission/webhook/manifests/kustomization.yaml @@ -2,6 +2,7 @@ resources: - 10-namespace.yaml - 20-configmap.yaml - 20-serviceaccount.yaml +- 20-resourcequota.yaml - 30-clusterrole.yaml - 40-clusterrolebinding.yaml - 50-deployment.yaml