mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #70114 from mikedanese/svcacctscale
scale test service account token projection in kubemark
This commit is contained in:
commit
95613765e4
@ -34,6 +34,7 @@ go_library(
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/volume/emptydir:go_default_library",
|
||||
"//pkg/volume/projected:go_default_library",
|
||||
"//pkg/volume/secret:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/volume/emptydir"
|
||||
"k8s.io/kubernetes/pkg/volume/projected"
|
||||
"k8s.io/kubernetes/pkg/volume/secret"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -64,6 +65,7 @@ func NewHollowKubelet(
|
||||
// -----------------
|
||||
volumePlugins := emptydir.ProbeVolumePlugins()
|
||||
volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...)
|
||||
volumePlugins = append(volumePlugins, projected.ProbeVolumePlugins()...)
|
||||
d := &kubelet.Dependencies{
|
||||
KubeClient: client,
|
||||
HeartbeatClient: client,
|
||||
|
@ -484,6 +484,18 @@ var _ = SIGDescribe("Density", func() {
|
||||
ns = f.Namespace.Name
|
||||
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||
|
||||
// This is used to mimic what new service account token volumes will
|
||||
// eventually look like. We can remove this once the controller manager
|
||||
// publishes the root CA certificate to each namespace.
|
||||
c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-root-ca-crt",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"ca.crt": "trust me, i'm a ca.crt",
|
||||
},
|
||||
})
|
||||
|
||||
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
@ -533,11 +545,12 @@ var _ = SIGDescribe("Density", func() {
|
||||
// Controls how often the apiserver is polled for pods
|
||||
interval time.Duration
|
||||
// What kind of resource we should be creating. Default: ReplicationController
|
||||
kind schema.GroupKind
|
||||
secretsPerPod int
|
||||
configMapsPerPod int
|
||||
daemonsPerNode int
|
||||
quotas bool
|
||||
kind schema.GroupKind
|
||||
secretsPerPod int
|
||||
configMapsPerPod int
|
||||
svcacctTokenProjectionsPerPod int
|
||||
daemonsPerNode int
|
||||
quotas bool
|
||||
}
|
||||
|
||||
densityTests := []Density{
|
||||
@ -556,6 +569,8 @@ var _ = SIGDescribe("Density", func() {
|
||||
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
|
||||
// Test with configmaps
|
||||
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
|
||||
// Test with service account projected volumes
|
||||
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), svcacctTokenProjectionsPerPod: 2},
|
||||
// Test with quotas
|
||||
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), quotas: true},
|
||||
}
|
||||
@ -575,12 +590,13 @@ var _ = SIGDescribe("Density", func() {
|
||||
feature = "HighDensityPerformance"
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons",
|
||||
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps, %v token projections, and %v daemons",
|
||||
feature,
|
||||
testArg.podsPerNode,
|
||||
testArg.kind,
|
||||
testArg.secretsPerPod,
|
||||
testArg.configMapsPerPod,
|
||||
testArg.svcacctTokenProjectionsPerPod,
|
||||
testArg.daemonsPerNode,
|
||||
)
|
||||
if testArg.quotas {
|
||||
@ -653,24 +669,25 @@ var _ = SIGDescribe("Density", func() {
|
||||
}
|
||||
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
||||
baseConfig := &testutils.RCConfig{
|
||||
Client: clients[i],
|
||||
InternalClient: internalClients[i],
|
||||
ScalesGetter: scalesClients[i],
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
Labels: map[string]string{"type": "densityPod"},
|
||||
PollInterval: DensityPollInterval,
|
||||
Timeout: timeout,
|
||||
PodStatusFile: fileHndl,
|
||||
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
|
||||
CpuRequest: nodeCpuCapacity / 100,
|
||||
MemRequest: nodeMemCapacity / 100,
|
||||
MaxContainerFailures: &MaxContainerFailures,
|
||||
Silent: true,
|
||||
LogFunc: framework.Logf,
|
||||
SecretNames: secretNames,
|
||||
ConfigMapNames: configMapNames,
|
||||
Client: clients[i],
|
||||
InternalClient: internalClients[i],
|
||||
ScalesGetter: scalesClients[i],
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
Labels: map[string]string{"type": "densityPod"},
|
||||
PollInterval: DensityPollInterval,
|
||||
Timeout: timeout,
|
||||
PodStatusFile: fileHndl,
|
||||
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
|
||||
CpuRequest: nodeCpuCapacity / 100,
|
||||
MemRequest: nodeMemCapacity / 100,
|
||||
MaxContainerFailures: &MaxContainerFailures,
|
||||
Silent: true,
|
||||
LogFunc: framework.Logf,
|
||||
SecretNames: secretNames,
|
||||
ConfigMapNames: configMapNames,
|
||||
ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
|
||||
}
|
||||
switch itArg.kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
|
@ -172,6 +172,8 @@ type RCConfig struct {
|
||||
// Names of the secrets and configmaps to mount.
|
||||
SecretNames []string
|
||||
ConfigMapNames []string
|
||||
|
||||
ServiceAccountTokenProjections int
|
||||
}
|
||||
|
||||
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
|
||||
@ -322,6 +324,10 @@ func (config *DeploymentConfig) create() error {
|
||||
attachConfigMaps(&deployment.Spec.Template, config.ConfigMapNames)
|
||||
}
|
||||
|
||||
for i := 0; i < config.ServiceAccountTokenProjections; i++ {
|
||||
attachServiceAccountTokenProjection(&deployment.Spec.Template, fmt.Sprintf("tok-%d", i))
|
||||
}
|
||||
|
||||
config.applyTo(&deployment.Spec.Template)
|
||||
|
||||
if err := CreateDeploymentWithRetries(config.Client, config.Namespace, deployment); err != nil {
|
||||
@ -1241,6 +1247,57 @@ func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) {
|
||||
template.Spec.Containers[0].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name string) {
|
||||
template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: "/var/service-account-tokens/" + name,
|
||||
})
|
||||
|
||||
template.Spec.Volumes = append(template.Spec.Volumes,
|
||||
v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ServiceAccountToken: &v1.ServiceAccountTokenProjection{
|
||||
Path: "token",
|
||||
Audience: name,
|
||||
},
|
||||
},
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "kube-root-ca-crt",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "ca.crt",
|
||||
Path: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
DownwardAPI: &v1.DownwardAPIProjection{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "namespace",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type DaemonConfig struct {
|
||||
Client clientset.Interface
|
||||
Name string
|
||||
|
Loading…
Reference in New Issue
Block a user