Merge remote-tracking branch 'origin/master'

This commit is contained in:
Anago GCB 2021-10-28 01:25:29 +00:00
commit e261afa0ab
38 changed files with 837 additions and 239 deletions

View File

@ -297,17 +297,18 @@ func (o *DrainCmdOptions) RunDrain() error {
return err
}
printObj, err := o.ToPrinter("drained")
if err != nil {
return err
}
drainedNodes := sets.NewString()
var fatal error
for _, info := range o.nodeInfos {
if err := o.deleteOrEvictPodsSimple(info); err == nil {
drainedNodes.Insert(info.Name)
printObj, err := o.ToPrinter("drained")
if err != nil {
return err
}
printObj(info.Object, o.Out)
} else {
if o.drainer.IgnoreErrors && len(o.nodeInfos) > 1 {

View File

@ -546,16 +546,18 @@ func TestDrain(t *testing.T) {
expectWarning string
expectFatal bool
expectDelete bool
expectOutputToContain string
}{
{
description: "RC-managed pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{rcPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
description: "RC-managed pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{rcPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "DS-managed pod",
@ -568,14 +570,15 @@ func TestDrain(t *testing.T) {
expectDelete: false,
},
{
description: "DS-managed terminated pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsTerminatedPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
description: "DS-managed terminated pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsTerminatedPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "orphaned DS-managed pod",
@ -588,76 +591,83 @@ func TestDrain(t *testing.T) {
expectDelete: false,
},
{
description: "orphaned DS-managed pod with --force",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{orphanedDsPod},
rcs: []corev1.ReplicationController{},
args: []string{"node", "--force"},
expectFatal: false,
expectDelete: true,
expectWarning: "WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/bar",
description: "orphaned DS-managed pod with --force",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{orphanedDsPod},
rcs: []corev1.ReplicationController{},
args: []string{"node", "--force"},
expectFatal: false,
expectDelete: true,
expectWarning: "WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/bar",
expectOutputToContain: "node/node drained",
},
{
description: "DS-managed pod with --ignore-daemonsets",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--ignore-daemonsets"},
expectFatal: false,
expectDelete: false,
description: "DS-managed pod with --ignore-daemonsets",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--ignore-daemonsets"},
expectFatal: false,
expectDelete: false,
expectOutputToContain: "node/node drained",
},
{
description: "DS-managed pod with emptyDir with --ignore-daemonsets",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsPodWithEmptyDir},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--ignore-daemonsets"},
expectWarning: "WARNING: ignoring DaemonSet-managed Pods: default/bar",
expectFatal: false,
expectDelete: false,
description: "DS-managed pod with emptyDir with --ignore-daemonsets",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{dsPodWithEmptyDir},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--ignore-daemonsets"},
expectWarning: "WARNING: ignoring DaemonSet-managed Pods: default/bar",
expectFatal: false,
expectDelete: false,
expectOutputToContain: "node/node drained",
},
{
description: "Job-managed pod with local storage",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{jobPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--force", "--delete-emptydir-data=true"},
expectFatal: false,
expectDelete: true,
description: "Job-managed pod with local storage",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{jobPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--force", "--delete-emptydir-data=true"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "Ensure compatibility for --delete-local-data until fully deprecated",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{jobPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--force", "--delete-local-data=true"},
expectFatal: false,
expectDelete: true,
description: "Ensure compatibility for --delete-local-data until fully deprecated",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{jobPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node", "--force", "--delete-local-data=true"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "Job-managed terminated pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{terminatedJobPodWithLocalStorage},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
description: "Job-managed terminated pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{terminatedJobPodWithLocalStorage},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "RS-managed pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{rsPod},
replicaSets: []appsv1.ReplicaSet{rs},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
description: "RS-managed pod",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{rsPod},
replicaSets: []appsv1.ReplicaSet{rs},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "naked pod",
@ -670,14 +680,15 @@ func TestDrain(t *testing.T) {
expectDelete: false,
},
{
description: "naked pod with --force",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{nakedPod},
rcs: []corev1.ReplicationController{},
args: []string{"node", "--force"},
expectFatal: false,
expectDelete: true,
description: "naked pod with --force",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{nakedPod},
rcs: []corev1.ReplicationController{},
args: []string{"node", "--force"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "pod with EmptyDir",
@ -689,33 +700,36 @@ func TestDrain(t *testing.T) {
expectDelete: false,
},
{
description: "terminated pod with emptyDir",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{emptydirTerminatedPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
description: "terminated pod with emptyDir",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{emptydirTerminatedPod},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "pod with EmptyDir and --delete-emptydir-data",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{emptydirPod},
args: []string{"node", "--force", "--delete-emptydir-data=true"},
expectFatal: false,
expectDelete: true,
description: "pod with EmptyDir and --delete-emptydir-data",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{emptydirPod},
args: []string{"node", "--force", "--delete-emptydir-data=true"},
expectFatal: false,
expectDelete: true,
expectOutputToContain: "node/node drained",
},
{
description: "empty node",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: false,
description: "empty node",
node: node,
expected: cordonedNode,
pods: []corev1.Pod{},
rcs: []corev1.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: false,
expectOutputToContain: "node/node drained",
},
{
description: "fail to list pods",
@ -859,7 +873,7 @@ func TestDrain(t *testing.T) {
}
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
ioStreams, _, _, errBuf := genericclioptions.NewTestIOStreams()
ioStreams, _, outBuf, errBuf := genericclioptions.NewTestIOStreams()
cmd := NewCmdDrain(tf, ioStreams)
var recovered interface{}
@ -925,6 +939,13 @@ func TestDrain(t *testing.T) {
t.Fatalf("%s: actual warning message did not match expected warning message.\n Expecting:\n%v\n Got:\n%v", test.description, e, a)
}
}
if len(test.expectOutputToContain) > 0 {
out := outBuf.String()
if !strings.Contains(out, test.expectOutputToContain) {
t.Fatalf("%s: expected output to contain: %s\nGot:\n%s", test.description, test.expectOutputToContain, out)
}
}
})
}
}

View File

@ -167,8 +167,11 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr
var comps []string
if len(args) == 0 {
comps = apiresources.CompGetResourceList(f, cmd, toComplete)
} else if len(args) == 1 {
} else {
comps = CompGetResource(f, cmd, args[0], toComplete)
if len(args) > 1 {
comps = cmdutil.Difference(comps, args[1:])
}
}
return comps, cobra.ShellCompDirectiveNoFileComp
},

View File

@ -102,7 +102,7 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams)
Short: i18n.T("Show the status of the rollout"),
Long: statusLong,
Example: statusExample,
ValidArgsFunction: util.SpecifiedResourceTypeAndNameCompletionFunc(f, validArgs),
ValidArgsFunction: util.SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f, validArgs),
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, args))
cmdutil.CheckErr(o.Validate())

View File

@ -733,3 +733,18 @@ func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) {
oldGeneratorName,
)
}
// Difference removes any elements of subArray from fullArray and returns the result
func Difference(fullArray []string, subArray []string) []string {
exclude := make(map[string]bool, len(subArray))
for _, elem := range subArray {
exclude[elem] = true
}
var result []string
for _, elem := range fullArray {
if _, found := exclude[elem]; !found {
result = append(result, elem)
}
}
return result
}

View File

@ -25,6 +25,9 @@ import (
"syscall"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
@ -321,3 +324,40 @@ func TestDumpReaderToFile(t *testing.T) {
t.Fatalf("Wrong file content %s != %s", testString, stringData)
}
}
func TestDifferenceFunc(t *testing.T) {
tests := []struct {
name string
fullArray []string
subArray []string
expected []string
}{
{
name: "remove some",
fullArray: []string{"a", "b", "c", "d"},
subArray: []string{"c", "b"},
expected: []string{"a", "d"},
},
{
name: "remove all",
fullArray: []string{"a", "b", "c", "d"},
subArray: []string{"b", "d", "a", "c"},
expected: nil,
},
{
name: "remove none",
fullArray: []string{"a", "b", "c", "d"},
subArray: nil,
expected: []string{"a", "b", "c", "d"},
},
}
for _, tc := range tests {
result := Difference(tc.fullArray, tc.subArray)
if !cmp.Equal(tc.expected, result, cmpopts.SortSlices(func(x, y string) bool {
return x < y
})) {
t.Errorf("%s -> Expected: %v, but got: %v", tc.name, tc.expected, result)
}
}
}

View File

@ -34,15 +34,18 @@ func SetFactoryForCompletion(f cmdutil.Factory) {
}
// ResourceTypeAndNameCompletionFunc Returns a completion function that completes as a first argument
// the resource types that match the toComplete prefix, and as a second argument the resource names that match
// the resource types that match the toComplete prefix, and all following arguments as resource names that match
// the toComplete prefix.
func ResourceTypeAndNameCompletionFunc(f cmdutil.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) {
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var comps []string
if len(args) == 0 {
comps = apiresources.CompGetResourceList(f, cmd, toComplete)
} else if len(args) == 1 {
} else {
comps = get.CompGetResource(f, cmd, args[0], toComplete)
if len(args) > 1 {
comps = cmdutil.Difference(comps, args[1:])
}
}
return comps, cobra.ShellCompDirectiveNoFileComp
}
@ -50,8 +53,19 @@ func ResourceTypeAndNameCompletionFunc(f cmdutil.Factory) func(*cobra.Command, [
// SpecifiedResourceTypeAndNameCompletionFunc Returns a completion function that completes as a first
// argument the resource types that match the toComplete prefix and are limited to the allowedTypes,
// and as a second argument the specified resource names that match the toComplete prefix.
// and all following arguments as resource names that match the toComplete prefix.
func SpecifiedResourceTypeAndNameCompletionFunc(f cmdutil.Factory, allowedTypes []string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) {
return doSpecifiedResourceTypeAndNameComp(f, allowedTypes, true)
}
// SpecifiedResourceTypeAndNameNoRepeatCompletionFunc Returns a completion function that completes as a first
// argument the resource types that match the toComplete prefix and are limited to the allowedTypes, and as
// a second argument a resource name that match the toComplete prefix.
func SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f cmdutil.Factory, allowedTypes []string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) {
return doSpecifiedResourceTypeAndNameComp(f, allowedTypes, false)
}
func doSpecifiedResourceTypeAndNameComp(f cmdutil.Factory, allowedTypes []string, repeat bool) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) {
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var comps []string
if len(args) == 0 {
@ -60,8 +74,13 @@ func SpecifiedResourceTypeAndNameCompletionFunc(f cmdutil.Factory, allowedTypes
comps = append(comps, comp)
}
}
} else if len(args) == 1 {
comps = get.CompGetResource(f, cmd, args[0], toComplete)
} else {
if repeat || len(args) == 1 {
comps = get.CompGetResource(f, cmd, args[0], toComplete)
if repeat && len(args) > 1 {
comps = cmdutil.Difference(comps, args[1:])
}
}
}
return comps, cobra.ShellCompDirectiveNoFileComp
}

View File

@ -35,115 +35,100 @@ import (
func TestResourceTypeAndNameCompletionFuncOneArg(t *testing.T) {
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
compFunc := ResourceTypeAndNameCompletionFunc(tf)
comps, directive := compFunc(cmd, []string{"pod"}, "b")
checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestResourceTypeAndNameCompletionFuncTooManyArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
func TestResourceTypeAndNameCompletionFuncRepeating(t *testing.T) {
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := ResourceTypeAndNameCompletionFunc(tf)
comps, directive := compFunc(cmd, []string{"pod", "pod-name"}, "")
checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp)
comps, directive := compFunc(cmd, []string{"pod", "bar"}, "")
// The other pods should be completed, but not the already specified ones
checkCompletion(t, comps, []string{"foo"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestSpecifiedResourceTypeAndNameCompletionFuncNoArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod", "service", "statefulset"})
comps, directive := compFunc(cmd, []string{}, "s")
checkCompletion(t, comps, []string{"service", "statefulset"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestSpecifiedResourceTypeAndNameCompletionFuncOneArg(t *testing.T) {
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
pods, _, _ := cmdtesting.TestData()
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
tf.UnstructuredClient = &fake.RESTClient{
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)},
}
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod"})
comps, directive := compFunc(cmd, []string{"pod"}, "b")
checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestSpecifiedResourceTypeAndNameCompletionFuncTooManyArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
func TestSpecifiedResourceTypeAndNameCompletionFuncRepeating(t *testing.T) {
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := SpecifiedResourceTypeAndNameCompletionFunc(tf, []string{"pod"})
comps, directive := compFunc(cmd, []string{"pod", "pod-name"}, "")
comps, directive := compFunc(cmd, []string{"pod", "bar"}, "")
// The other pods should be completed, but not the already specified ones
checkCompletion(t, comps, []string{"foo"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestSpecifiedResourceTypeAndNameCompletionNoRepeatFuncOneArg(t *testing.T) {
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
compFunc := SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(tf, []string{"pod"})
comps, directive := compFunc(cmd, []string{"pod"}, "b")
checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestSpecifiedResourceTypeAndNameCompletionNoRepeatFuncMultiArg(t *testing.T) {
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
compFunc := SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(tf, []string{"pod"})
comps, directive := compFunc(cmd, []string{"pod", "bar"}, "")
// There should not be any more pods shown as this function should not repeat the completion
checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestResourceNameCompletionFuncNoArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
pods, _, _ := cmdtesting.TestData()
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
tf.UnstructuredClient = &fake.RESTClient{
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)},
}
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := ResourceNameCompletionFunc(tf, "pod")
comps, directive := compFunc(cmd, []string{}, "b")
checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestResourceNameCompletionFuncTooManyArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory()
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := ResourceNameCompletionFunc(tf, "pod")
comps, directive := compFunc(cmd, []string{"pod-name"}, "")
checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestPodResourceNameAndContainerCompletionFuncNoArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
pods, _, _ := cmdtesting.TestData()
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
tf.UnstructuredClient = &fake.RESTClient{
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
Resp: &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, pods)},
}
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := PodResourceNameAndContainerCompletionFunc(tf)
comps, directive := compFunc(cmd, []string{}, "b")
checkCompletion(t, comps, []string{"bar"}, directive, cobra.ShellCompDirectiveNoFileComp)
}
func TestPodResourceNameAndContainerCompletionFuncTooManyArgs(t *testing.T) {
tf := cmdtesting.NewTestFactory().WithNamespace("test")
defer tf.Cleanup()
tf, cmd := prepareCompletionTest()
addPodsToFactory(tf)
streams, _, _, _ := genericclioptions.NewTestIOStreams()
cmd := get.NewCmdGet("kubectl", tf, streams)
compFunc := PodResourceNameAndContainerCompletionFunc(tf)
comps, directive := compFunc(cmd, []string{"pod-name", "container-name"}, "")
checkCompletion(t, comps, []string{}, directive, cobra.ShellCompDirectiveNoFileComp)

View File

@ -0,0 +1,5 @@
# Webhook binary
pod-security-webhook
# Directory containing pki files
pki/

View File

@ -41,8 +41,8 @@ import (
)
const (
namespaceMaxPodsToCheck = 3000
namespacePodCheckTimeout = 1 * time.Second
defaultNamespaceMaxPodsToCheck = 3000
defaultNamespacePodCheckTimeout = 1 * time.Second
)
// Admission implements the core admission logic for the Pod Security Admission controller.
@ -64,6 +64,9 @@ type Admission struct {
PodLister PodLister
defaultPolicy api.Policy
namespaceMaxPodsToCheck int
namespacePodCheckTimeout time.Duration
}
type NamespaceGetter interface {
@ -152,6 +155,8 @@ func (a *Admission) CompleteConfiguration() error {
a.defaultPolicy = p
}
}
a.namespaceMaxPodsToCheck = defaultNamespaceMaxPodsToCheck
a.namespacePodCheckTimeout = defaultNamespacePodCheckTimeout
if a.PodSpecExtractor == nil {
a.PodSpecExtractor = &DefaultPodSpecExtractor{}
@ -173,6 +178,9 @@ func (a *Admission) ValidateConfiguration() error {
return fmt.Errorf("default policy does not match; CompleteConfiguration() was not called before ValidateConfiguration()")
}
}
if a.namespaceMaxPodsToCheck == 0 || a.namespacePodCheckTimeout == 0 {
return fmt.Errorf("namespace configuration not set; CompleteConfiguration() was not called before ValidateConfiguration()")
}
if a.Metrics == nil {
return fmt.Errorf("Metrics recorder required")
}
@ -409,14 +417,14 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli
}
if klog.V(5).Enabled() {
klog.InfoS("Pod Security evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
klog.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
}
response := allowedResponse()
if enforce {
if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Enforce, podMetadata, podSpec)); !result.Allowed {
response = forbiddenResponse(fmt.Sprintf(
"Pod violates PodSecurity %q: %s",
"pod violates PodSecurity %q: %s",
nsPolicy.Enforce.String(),
result.ForbiddenDetail(),
))
@ -429,7 +437,7 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli
// TODO: reuse previous evaluation if audit level+version is the same as enforce level+version
if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Audit, podMetadata, podSpec)); !result.Allowed {
auditAnnotations["audit"] = fmt.Sprintf(
"Would violate PodSecurity %q: %s",
"would violate PodSecurity %q: %s",
nsPolicy.Audit.String(),
result.ForbiddenDetail(),
)
@ -442,7 +450,7 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli
if result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Warn, podMetadata, podSpec)); !result.Allowed {
// TODO: Craft a better user-facing warning message
response.Warnings = append(response.Warnings, fmt.Sprintf(
"Would violate PodSecurity %q: %s",
"would violate PodSecurity %q: %s",
nsPolicy.Warn.String(),
result.ForbiddenDetail(),
))
@ -464,9 +472,10 @@ type podCount struct {
}
func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace string, enforce api.LevelVersion) []string {
timeout := namespacePodCheckTimeout
// start with the default timeout
timeout := a.namespacePodCheckTimeout
if deadline, ok := ctx.Deadline(); ok {
timeRemaining := time.Duration(0.9 * float64(time.Until(deadline))) // Leave a little time to respond.
timeRemaining := time.Until(deadline) / 2 // don't take more than half the remaining time
if timeout > timeRemaining {
timeout = timeRemaining
}
@ -477,8 +486,8 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin
pods, err := a.PodLister.ListPods(ctx, namespace)
if err != nil {
klog.ErrorS(err, "Failed to list pods", "namespace", namespace)
return []string{"Failed to list pods"}
klog.ErrorS(err, "failed to list pods", "namespace", namespace)
return []string{"failed to list pods while checking new PodSecurity enforce level"}
}
var (
@ -487,11 +496,12 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin
podWarnings []string
podWarningsToCount = make(map[string]podCount)
)
if len(pods) > namespaceMaxPodsToCheck {
warnings = append(warnings, fmt.Sprintf("Large namespace: only checking the first %d of %d pods", namespaceMaxPodsToCheck, len(pods)))
pods = pods[0:namespaceMaxPodsToCheck]
totalPods := len(pods)
if len(pods) > a.namespaceMaxPodsToCheck {
pods = pods[0:a.namespaceMaxPodsToCheck]
}
checkedPods := len(pods)
for i, pod := range pods {
// short-circuit on exempt runtimeclass
if a.exemptRuntimeClass(pod.Spec.RuntimeClassName) {
@ -510,12 +520,20 @@ func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace strin
c.podCount++
podWarningsToCount[warning] = c
}
if time.Now().After(deadline) {
warnings = append(warnings, fmt.Sprintf("Timeout reached after checking %d pods", i+1))
if err := ctx.Err(); err != nil { // deadline exceeded or context was cancelled
checkedPods = i + 1
break
}
}
if checkedPods < totalPods {
warnings = append(warnings, fmt.Sprintf("new PodSecurity enforce level only checked against the first %d of %d existing pods", checkedPods, totalPods))
}
if len(podWarnings) > 0 {
warnings = append(warnings, fmt.Sprintf("existing pods in namespace %q violate the new PodSecurity enforce level %q", namespace, enforce.String()))
}
// prepend pod names to warnings
decoratePodWarnings(podWarningsToCount, podWarnings)
// put warnings in a deterministic order

View File

@ -21,6 +21,7 @@ import (
"reflect"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
@ -174,9 +175,14 @@ func TestDefaultHasPodSpec(t *testing.T) {
type testEvaluator struct {
lv api.LevelVersion
delay time.Duration
}
func (t *testEvaluator) EvaluatePod(lv api.LevelVersion, meta *metav1.ObjectMeta, spec *corev1.PodSpec) []policy.CheckResult {
if t.delay > 0 {
time.Sleep(t.delay)
}
t.lv = lv
if meta.Annotations["error"] != "" {
return []policy.CheckResult{{Allowed: false, ForbiddenReason: meta.Annotations["error"]}}
@ -196,10 +202,17 @@ func (t *testNamespaceGetter) GetNamespace(ctx context.Context, name string) (*c
type testPodLister struct {
called bool
pods []*corev1.Pod
delay time.Duration
}
func (t *testPodLister) ListPods(ctx context.Context, namespace string) ([]*corev1.Pod, error) {
t.called = true
if t.delay > 0 {
time.Sleep(t.delay)
}
if err := ctx.Err(); err != nil {
return nil, err
}
return t.pods, nil
}
@ -218,6 +231,10 @@ func TestValidateNamespace(t *testing.T) {
oldLabels map[string]string
// list of pods to return
pods []*corev1.Pod
// time to sleep while listing
delayList time.Duration
// time to sleep while evaluating
delayEvaluation time.Duration
expectAllowed bool
expectError string
@ -352,7 +369,11 @@ func TestValidateNamespace(t *testing.T) {
expectAllowed: true,
expectListPods: true,
expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()},
expectWarnings: []string{"noruntimeclasspod (and 2 other pods): message", "runtimeclass3pod: message, message2"},
expectWarnings: []string{
`existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`,
"noruntimeclasspod (and 2 other pods): message",
"runtimeclass3pod: message, message2",
},
},
{
name: "update with runtimeclass exempt pods",
@ -362,11 +383,57 @@ func TestValidateNamespace(t *testing.T) {
expectAllowed: true,
expectListPods: true,
expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()},
expectWarnings: []string{"noruntimeclasspod (and 1 other pod): message", "runtimeclass3pod: message, message2"},
expectWarnings: []string{
`existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`,
"noruntimeclasspod (and 1 other pod): message",
"runtimeclass3pod: message, message2",
},
},
{
name: "timeout on list",
newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)},
oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)},
delayList: time.Second + 100*time.Millisecond,
expectAllowed: true,
expectListPods: true,
expectWarnings: []string{
`failed to list pods while checking new PodSecurity enforce level`,
},
},
{
name: "timeout on evaluate",
newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)},
oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)},
delayEvaluation: (time.Second + 100*time.Millisecond) / 2, // leave time for two evaluations
expectAllowed: true,
expectListPods: true,
expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()},
expectWarnings: []string{
`new PodSecurity enforce level only checked against the first 2 of 4 existing pods`,
`existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`,
`noruntimeclasspod (and 1 other pod): message`,
},
},
{
name: "bound number of pods",
newLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelRestricted)},
oldLabels: map[string]string{api.EnforceLevelLabel: string(api.LevelBaseline)},
pods: []*corev1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Annotations: map[string]string{"error": "message"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", Annotations: map[string]string{"error": "message"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod3", Annotations: map[string]string{"error": "message"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod4", Annotations: map[string]string{"error": "message"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod5", Annotations: map[string]string{"error": "message"}}},
},
expectAllowed: true,
expectListPods: true,
expectEvaluate: api.LevelVersion{Level: api.LevelRestricted, Version: api.LatestVersion()},
expectWarnings: []string{
`new PodSecurity enforce level only checked against the first 4 of 5 existing pods`,
`existing pods in namespace "test" violate the new PodSecurity enforce level "restricted:latest"`,
`pod1 (and 3 other pods): message`,
},
},
// TODO: test for bounding evalution time with a warning
// TODO: test for bounding pod count with a warning
// TODO: test for prioritizing evaluating pods from unique controllers
}
@ -428,8 +495,8 @@ func TestValidateNamespace(t *testing.T) {
},
}
}
podLister := &testPodLister{pods: pods}
evaluator := &testEvaluator{}
podLister := &testPodLister{pods: pods, delay: tc.delayList}
evaluator := &testEvaluator{delay: tc.delayEvaluation}
a := &Admission{
PodLister: podLister,
Evaluator: evaluator,
@ -441,6 +508,9 @@ func TestValidateNamespace(t *testing.T) {
},
Metrics: NewMockRecorder(),
defaultPolicy: defaultPolicy,
namespacePodCheckTimeout: time.Second,
namespaceMaxPodsToCheck: 4,
}
result := a.ValidateNamespace(context.TODO(), attrs)
if result.Allowed != tc.expectAllowed {
@ -567,16 +637,16 @@ func TestValidatePodController(t *testing.T) {
desc: "bad deploy creates produce correct user-visible warnings and correct auditAnnotations",
newObject: &badDeploy,
gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
expectAuditAnnotations: map[string]string{"audit": "Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectWarnings: []string{"Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectAuditAnnotations: map[string]string{"audit": "would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectWarnings: []string{"would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
},
{
desc: "bad spec updates don't block on enforce failures and returns correct information",
newObject: &badDeploy,
oldObject: &goodDeploy,
gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
expectAuditAnnotations: map[string]string{"audit": "Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectWarnings: []string{"Would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectAuditAnnotations: map[string]string{"audit": "would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
expectWarnings: []string{"would violate PodSecurity \"baseline:latest\": forbidden sysctls (unknown)"},
},
}

View File

@ -0,0 +1,19 @@
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/distroless/static:latest
COPY pod-security-webhook /pod-security-webhook
ENTRYPOINT [ "/pod-security-webhook" ]

View File

@ -0,0 +1,66 @@
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build container push clean
ENTRYPOINT = "../cmd/webhook/webhook.go"
EXECUTABLE = "pod-security-webhook"
# Relative to location in staging dir
KUBE_ROOT = "../../../../.."
IMAGE_DOCKERFILE = "Dockerfile"
REGISTRY ?= "gcr.io/k8s-staging-sig-auth"
IMAGE ?= "$(REGISTRY)/pod-security-webhook"
TAG ?= "latest"
OS ?= linux
ARCH ?= amd64
# Builds the PodSecurity webhook binary.
build:
@echo Building PodSecurity webhook...
@LDFLAGS=`cd -P . && /usr/bin/env bash -c '. $(KUBE_ROOT)/hack/lib/version.sh && KUBE_ROOT=$(KUBE_ROOT) KUBE_GO_PACKAGE=k8s.io/kubernetes kube::version::ldflags'`; \
GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 go build -o $(EXECUTABLE) -ldflags "$$LDFLAGS" $(ENTRYPOINT)
@echo Done!
# Builds the PodSecurity webhook Docker image.
container: build
@echo Building PodSecurity webhook image...
@docker build \
-f $(IMAGE_DOCKERFILE) \
-t $(IMAGE):$(TAG) .
@echo Done!
# Creates a CA and serving certificate valid for webhook.pod-security-webhook.svc
certs:
rm -fr pki
mkdir -p pki
openssl genrsa -out pki/ca.key 2048
openssl req -new -x509 -days 3650 -key pki/ca.key -subj "/CN=pod-security-webhook-ca-$(date +%s)" -out pki/ca.crt
openssl req -newkey rsa:2048 -nodes -keyout pki/tls.key -subj "/CN=webhook.pod-security-webhook.svc" -out pki/tls.csr
echo "subjectAltName=DNS:webhook.pod-security-webhook.svc" > pki/extensions.txt
echo "extendedKeyUsage=serverAuth" >> pki/extensions.txt
openssl x509 -req -extfile pki/extensions.txt -days 730 -in pki/tls.csr -CA pki/ca.crt -CAkey pki/ca.key -CAcreateserial -out pki/tls.crt
# Publishes the PodSecurity webhook Docker image to the configured registry.
push:
@docker push $(IMAGE):$(TAG)
# Removes Pod Security Webhook artifacts.
clean:
rm -f $(EXECUTABLE)
rm -fr pki

View File

@ -0,0 +1,33 @@
# Pod Security Admission Webhook
This directory contains files for a _Validating Admission Webhook_ that checks for conformance to the Pod Security Standards. It is built with the same Go package as the [Pod Security Admission Controller](https://kubernetes.io/docs/concepts/security/pod-security-admission/). The webhook is suitable for environments where the built-in PodSecurity admission controller cannot be used.
For more information, see the [Dynamic Admission Control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) documentation on the Kubernetes website.
## Getting Started
The webhook is available as a Docker image that lives within the SIG-Auth container registry. In addition to the `Dockerfile` for the webhook, this directory also contains sample Kubernetes manifests that can be used to deploy the webhook to a Kubernetes cluster.
### Configuring the Webhook Certificate
Run `make certs` to generate a CA and serving certificate valid for `https://webhook.pod-security-webhook.svc`.
### Deploying the Webhook
Apply the manifests to install the webhook in your cluster:
```bash
kubectl apply -k .
```
This applies the manifests in the `manifests` subdirectory,
creates a secret containing the serving certificate,
and injects the CA bundle to the validating webhook.
### Configuring the Webhook
Similar to the Pod Security Admission Controller, the webhook requires a configuration file to determine how incoming resources are validated. For real-world deployments, we highly recommend reviewing our [documentation on selecting appropriate policy levels](https://kubernetes.io/docs/tasks/configure-pod-container/migrate-from-psp/#steps).
## Contributing
Please see the [contributing guidelines](../CONTRIBUTING.md) in the parent directory for general information about contributing to this project.

View File

@ -0,0 +1,33 @@
# include the manifests
bases:
- ./manifests
# generate the secret
# this depends on pki files, which can be created (or regenerated) with `make certs`
secretGenerator:
- name: pod-security-webhook
namespace: pod-security-webhook
type: kubernetes.io/tls
options:
disableNameSuffixHash: true
files:
- pki/ca.crt
- pki/tls.crt
- pki/tls.key
# inject the CA into the validating webhook
replacements:
- source:
kind: Secret
name: pod-security-webhook
namespace: pod-security-webhook
fieldPath: data.ca\.crt
targets:
- select:
kind: ValidatingWebhookConfiguration
name: pod-security-webhook.kubernetes.io
fieldPaths:
- webhooks.0.clientConfig.caBundle
- webhooks.1.clientConfig.caBundle
options:
create: true

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: pod-security-webhook

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: pod-security-webhook
namespace: pod-security-webhook
data:
podsecurityconfiguration.yaml: |
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
# Defaults applied when a mode label is not set.
#
# Level label values must be one of:
# - "privileged" (default)
# - "baseline"
# - "restricted"
#
# Version label values must be one of:
# - "latest" (default)
# - specific version like "v1.22"
defaults:
enforce: "privileged"
enforce-version: "latest"
audit: "privileged"
audit-version: "latest"
warn: "privileged"
warn-version: "latest"
exemptions:
# Array of authenticated usernames to exempt.
usernames: []
# Array of runtime class names to exempt.
runtimeClasses: []
# Array of namespaces to exempt.
namespaces: []

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: pod-security-webhook
namespace: pod-security-webhook
spec:
hard:
pods: 3
scopeSelector:
matchExpressions:
- operator: In
scopeName: PriorityClass
values:
- system-cluster-critical

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: pod-security-webhook
namespace: pod-security-webhook

View File

@ -0,0 +1,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-security-webhook
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pod-security-webhook
subjects:
- kind: ServiceAccount
name: pod-security-webhook
namespace: pod-security-webhook
roleRef:
kind: ClusterRole
name: pod-security-webhook
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-security-webhook
namespace: pod-security-webhook
labels:
app: pod-security-webhook
spec:
selector:
matchLabels:
app: pod-security-webhook
template:
metadata:
labels:
app: pod-security-webhook
spec:
serviceAccountName: pod-security-webhook
priorityClassName: system-cluster-critical
volumes:
- name: config
configMap:
name: pod-security-webhook
- name: pki
secret:
secretName: pod-security-webhook
containers:
- name: pod-security-webhook
image: k8s.gcr.io/sig-auth/pod-security-webhook:v1.22-alpha.0
terminationMessagePolicy: FallbackToLogsOnError
ports:
- containerPort: 8443
args:
[
"--config",
"/etc/config/podsecurityconfiguration.yaml",
"--tls-cert-file",
"/etc/pki/tls.crt",
"--tls-private-key-file",
"/etc/pki/tls.key",
"--secure-port",
"8443",
]
resources:
requests:
cpu: 100m
limits:
cpu: 500m
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- name: config
mountPath: "/etc/config"
readOnly: true
- name: pki
mountPath: "/etc/pki"
readOnly: true

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: webhook
namespace: pod-security-webhook
labels:
app: pod-security-webhook
spec:
ports:
- port: 443
targetPort: 8443
protocol: TCP
name: https
selector:
app: pod-security-webhook

View File

@ -0,0 +1,74 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: "pod-security-webhook.kubernetes.io"
webhooks:
# Audit annotations will be prefixed with this name
- name: "pod-security-webhook.kubernetes.io"
# Fail-closed admission webhooks can present operational challenges.
# You may want to consider using a failure policy of Ignore, but should
# consider the security tradeoffs.
failurePolicy: Fail
namespaceSelector:
# Exempt the webhook itself to avoid a circular dependency.
matchExpressions:
- key: kubernetes.io/metadata.name
operator: NotIn
values: ["pod-security-webhook"]
rules:
- apiGroups: [""]
apiVersions: ["v1"]
operations: ["CREATE", "UPDATE"]
resources:
- namespaces
- pods
- pods/ephemeralcontainers
clientConfig:
# Populate with the CA for the serving certificate
caBundle: ""
service:
namespace: "pod-security-webhook"
name: "webhook"
admissionReviewVersions: ["v1"]
sideEffects: None
timeoutSeconds: 5
# Audit annotations will be prefixed with this name
- name: "advisory.pod-security-webhook.kubernetes.io"
# Non-enforcing resources can safely fail-open.
failurePolicy: Ignore
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: NotIn
values: ["pod-security-webhook"]
rules:
- apiGroups: [""]
apiVersions: ["v1"]
operations: ["CREATE", "UPDATE"]
resources:
- podtemplates
- replicationcontrollers
- apiGroups: ["apps"]
apiVersions: ["v1"]
operations: ["CREATE", "UPDATE"]
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
- apiGroups: ["batch"]
apiVersions: ["v1"]
operations: ["CREATE", "UPDATE"]
resources:
- cronjobs
- jobs
clientConfig:
# Populate with the CA for the serving certificate
caBundle: ""
service:
namespace: "pod-security-webhook"
name: "webhook"
admissionReviewVersions: ["v1"]
sideEffects: None
timeoutSeconds: 5

View File

@ -0,0 +1,10 @@
resources:
- 10-namespace.yaml
- 20-configmap.yaml
- 20-serviceaccount.yaml
- 20-resourcequota.yaml
- 30-clusterrole.yaml
- 40-clusterrolebinding.yaml
- 50-deployment.yaml
- 60-service.yaml
- 70-validatingwebhookconfiguration.yaml

View File

@ -491,7 +491,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
StorageClassName: &emptyStorageClass,
}
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc))

View File

@ -19,8 +19,11 @@ package framework
import (
"context"
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
@ -295,17 +298,40 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
}
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
// If quota failure strings are found for other platforms, they can be added to improve reliability when running
// many parallel test jobs in a single cloud account. This corresponds to controller-like behavior and
// to what we would recommend for general clients.
if strings.Contains(lastCreateErr.Error(), `googleapi: Error 403: Quota exceeded for quota group`) {
return false, nil
}
// if it was not a quota failure, fail immediately
return false, lastCreateErr
}
return true, nil
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
return pv, nil
return resultPV, nil
}
// CreatePV creates the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, timeouts, pv)
}
// CreatePVC creates the PVC resource. Fails test on error.
@ -323,7 +349,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
// Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created
// before the pv.
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
// make the pvc spec
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
preBindMsg := ""
@ -344,7 +370,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
pv.Spec.ClaimRef.Name = pvc.Name
}
pv, err = createPV(c, pv)
pv, err = createPV(c, timeouts, pv)
if err != nil {
return nil, pvc, err
}
@ -358,7 +384,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created
// before the pvc.
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
preBindMsg := ""
if preBind {
preBindMsg = " pre-bound"
@ -370,7 +396,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
// instantiate the pv
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return nil, nil, err
}
@ -392,7 +418,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// sees an error returned, it needs to decide what to do about entries in the maps.
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
// orphaned pvs are not deleted and will remain after the suite completes.
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
pvMap := make(PVMap, numpvs)
pvcMap := make(PVCMap, numpvcs)
extraPVCs := 0
@ -405,7 +431,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create pvs and pvcs
for i := 0; i < pvsToCreate; i++ {
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false)
if err != nil {
return pvMap, pvcMap, err
}
@ -416,7 +442,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create extra pvs or pvcs as needed
for i := 0; i < extraPVs; i++ {
pv := MakePersistentVolume(pvConfig)
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return pvMap, pvcMap, err
}

View File

@ -29,6 +29,7 @@ const (
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvCreateTimeout = 3 * time.Minute
pvDeleteTimeout = 3 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
@ -67,6 +68,9 @@ type TimeoutContext struct {
// PVBound is how long PVs have to become bound.
PVBound time.Duration
// PVCreate is how long PVs have to be created.
PVCreate time.Duration
// PVDelete is how long PVs have to become deleted.
PVDelete time.Duration
@ -95,6 +99,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVCreate: pvCreateTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,

View File

@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
VolumeMode: pvc.Spec.VolumeMode,
})
_, err = e2epv.CreatePV(c, pv)
_, err = e2epv.CreatePV(c, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)
ginkgo.By("Waiting for PVC to be in bound phase")

View File

@ -140,7 +140,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
VolumeMode: pvc.Spec.VolumeMode,
})
_, err = e2epv.CreatePV(c, pv)
_, err = e2epv.CreatePV(c, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv %v", err)
ginkgo.By("Waiting for PVC to be in bound phase")

View File

@ -259,7 +259,7 @@ func createPVCPV(
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, f.Timeouts, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)

View File

@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
PVSource: *pvSource1,
Prebind: nil,
}
pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
pv1, pvc1, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig1, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1))
@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
PVSource: *pvSource2,
Prebind: nil,
}
pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
pv2, pvc2, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig2, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2))
@ -293,7 +293,7 @@ func createGCEVolume() (*v1.PersistentVolumeSource, string) {
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
// by the test.
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err := e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
defer func() {
if err != nil {
e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns)

View File

@ -46,7 +46,7 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
ginkgo.By("Creating the PV and PVC")
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
pv, pvc, err := e2epv.CreatePVPVC(c, t, pvConfig, pvcConfig, ns, isPrebound)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc))

View File

@ -463,7 +463,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
for _, localVolumes := range allLocalVolumes {
for _, localVolume := range localVolumes {
pvConfig := makeLocalPVConfig(config, localVolume)
localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
}
}
@ -505,7 +505,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
pvConfig := makeLocalPVConfig(config, localVolume)
localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
localVolume.pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
}
}
@ -637,7 +637,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}
pvConfig := makeLocalPVConfig(config, localVolume)
var err error
pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig))
pv, err = e2epv.CreatePV(config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err)
})
@ -936,7 +936,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
pvcConfig := makeLocalPVCConfig(config, volume.localVolumeType)
pvConfig := makeLocalPVConfig(config, volume)
volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, false)
volume.pv, volume.pvc, err = e2epv.CreatePVPVC(config.client, config.timeouts, pvConfig, pvcConfig, config.ns, false)
framework.ExpectNoError(err)
}

View File

@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// contains the claim. Verify that the PV and PVC bind correctly, and
// that the pod can write to the nfs volume.
ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() {
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() {
pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PVC and a pre-bound PV: test write access", func() {
pv, pvc, err = e2epv.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
ginkgo.It("create a PV and a pre-bound PVC: test write access", func() {
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
completeTest(f, c, ns, pv, pvc)
})
@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding
ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() {
numPVs, numPVCs := 2, 4
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding
ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() {
numPVs, numPVCs := 3, 3
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
@ -251,7 +251,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Note: PVs are created before claims and no pre-binding.
ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() {
numPVs, numPVCs := 4, 2
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.Context("when invoking the Recycle reclaim policy", func() {
ginkgo.BeforeEach(func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
})

View File

@ -19,6 +19,7 @@ package storage
import (
"context"
"fmt"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -110,7 +111,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
className := ""
pvcConfig := e2epv.PersistentVolumeClaimConfig{StorageClassName: &className}
config.pv, config.pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
config.pv, config.pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err)
}

View File

@ -598,7 +598,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ginkgo.It("should create unbound pv count metrics for pvc controller after creating pv only",
func() {
var err error
pv, err = e2epv.CreatePV(c, pv)
pv, err = e2epv.CreatePV(c, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey)
validator([]map[string]int64{nil, {className: 1}, nil, nil})
@ -616,7 +616,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
ginkgo.It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc",
func() {
var err error
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err, "Error creating pv pvc: %v", err)
waitForPVControllerSync(metricsGrabber, boundPVKey, classKey)
waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey)
@ -627,7 +627,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
func() {
var err error
dimensions := []string{pluginNameKey, volumeModeKey}
pv, err = e2epv.CreatePV(c, pv)
pv, err = e2epv.CreatePV(c, f.Timeouts, pv)
framework.ExpectNoError(err, "Error creating pv: %v", err)
waitForPVControllerSync(metricsGrabber, totalPVKey, pluginNameKey)
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()

View File

@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
}
}
ginkgo.By("Creating the PV and PVC")
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))