Merge pull request #37837 from gmarek/secrets

Automatic merge from submit-queue

Add secrets to Density and Load tests

cc @jeremyeder @timstclair @sjug
This commit is contained in:
Kubernetes Submit Queue 2016-12-08 08:36:03 -08:00 committed by GitHub
commit 907a80c7af
3 changed files with 181 additions and 26 deletions

View File

@ -64,7 +64,8 @@ type DensityTestConfig struct {
PollInterval time.Duration
PodCount int
// What kind of resource we want to create
kind schema.GroupKind
kind schema.GroupKind
SecretConfigs []*testutils.SecretConfig
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
@ -192,6 +193,10 @@ func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels
func runDensityTest(dtc DensityTestConfig) time.Duration {
defer GinkgoRecover()
// Create all secrets
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Run()
}
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
@ -257,6 +262,11 @@ func cleanupDensityTest(dtc DensityTestConfig) {
framework.ExpectNoError(err)
}
}
// Delete all secrets
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
@ -357,7 +367,8 @@ var _ = framework.KubeDescribe("Density", func() {
// Controls how often the apiserver is polled for pods
interval time.Duration
// What kind of resource we should be creating. Default: ReplicationController
kind schema.GroupKind
kind schema.GroupKind
secretsPerPod int
}
densityTests := []Density{
@ -380,7 +391,7 @@ var _ = framework.KubeDescribe("Density", func() {
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v", feature, testArg.podsPerNode, testArg.kind)
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets", feature, testArg.podsPerNode, testArg.kind, testArg.secretsPerPod)
itArg := testArg
It(name, func() {
nodePreparer := framework.NewE2ETestNodePreparer(
@ -405,6 +416,7 @@ var _ = framework.KubeDescribe("Density", func() {
framework.ExpectNoError(err)
configs := make([]testutils.RunObjectConfig, numberOfCollections)
secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
// Since all RCs are created at the same time, timeout for each config
// has to assume that it will be run at the very end.
podThroughput := 20
@ -412,8 +424,20 @@ var _ = framework.KubeDescribe("Density", func() {
// createClients is defined in load.go
clients, internalClients, err := createClients(numberOfCollections)
for i := 0; i < numberOfCollections; i++ {
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
nsName := namespaces[i].Name
secretNames := []string{}
for j := 0; j < itArg.secretsPerPod; j++ {
secretName := fmt.Sprintf("density-secret-%v-%v", i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: secretName,
Namespace: nsName,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig := &testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
@ -429,6 +453,8 @@ var _ = framework.KubeDescribe("Density", func() {
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
}
switch itArg.kind {
case api.Kind("ReplicationController"):
@ -449,6 +475,7 @@ var _ = framework.KubeDescribe("Density", func() {
PodCount: totalPods,
PollInterval: DensityPollInterval,
kind: itArg.kind,
SecretConfigs: secretConfigs,
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
@ -694,6 +721,7 @@ var _ = framework.KubeDescribe("Density", func() {
Replicas: podsPerCollection,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
}
}
dConfig := DensityTestConfig{

View File

@ -70,6 +70,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
var nodeCount int
var ns string
var configs []testutils.RunObjectConfig
var secretConfigs []*testutils.SecretConfig
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
@ -121,7 +122,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
image string
command []string
// What kind of resource we want to create
kind schema.GroupKind
kind schema.GroupKind
services bool
secretsPerPod int
}
loadTests := []Load{
@ -135,8 +138,10 @@ var _ = framework.KubeDescribe("Load capacity", func() {
if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") {
feature = "Performance"
}
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v", feature, testArg.podsPerNode, testArg.kind)
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets",
feature, testArg.podsPerNode, testArg.kind, testArg.secretsPerPod)
itArg := testArg
itArg.services = os.Getenv("CREATE_SERVICES") == "true"
It(name, func() {
// Create a number of namespaces.
@ -145,11 +150,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
framework.ExpectNoError(err)
totalPods := itArg.podsPerNode * nodeCount
configs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind)
configs, secretConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod)
var services []*v1.Service
// Read the environment variable to see if we want to create services
createServices := os.Getenv("CREATE_SERVICES")
if createServices == "true" {
if itArg.services {
framework.Logf("Creating services")
services := generateServicesForConfigs(configs)
for _, service := range services {
@ -160,6 +163,10 @@ var _ = framework.KubeDescribe("Load capacity", func() {
} else {
framework.Logf("Skipping service creation")
}
// Create all secrets
for i := range secretConfigs {
secretConfigs[i].Run()
}
// Simulate lifetime of RC:
// * create with initial size
@ -200,7 +207,11 @@ var _ = framework.KubeDescribe("Load capacity", func() {
deletingTime := time.Duration(totalPods/throughput) * time.Second
framework.Logf("Starting to delete ReplicationControllers...")
deleteAllResources(configs, deletingTime)
if createServices == "true" {
// Delete all secrets
for i := range secretConfigs {
secretConfigs[i].Stop()
}
if itArg.services {
framework.Logf("Starting to delete services...")
for _, service := range services {
err := clientset.Core().Services(ns).Delete(service.Name, nil)
@ -270,21 +281,35 @@ func computePodCounts(total int) (int, int, int) {
// - 300 small RCs each 5 pods
// - 25 medium RCs each 30 pods
// - 3 big RCs each 250 pods
bigRCCount := total / 4 / bigGroupSize
total -= bigRCCount * bigGroupSize
mediumRCCount := total / 3 / mediumGroupSize
total -= mediumRCCount * mediumGroupSize
smallRCCount := total / smallGroupSize
return smallRCCount, mediumRCCount, bigRCCount
bigGroupCount := total / 4 / bigGroupSize
total -= bigGroupCount * bigGroupSize
mediumGroupCount := total / 3 / mediumGroupSize
total -= mediumGroupCount * mediumGroupSize
smallGroupCount := total / smallGroupSize
return smallGroupCount, mediumGroupCount, bigGroupCount
}
func generateConfigs(totalPods int, image string, command []string, nss []*v1.Namespace, kind schema.GroupKind) []testutils.RunObjectConfig {
func generateConfigs(
totalPods int,
image string,
command []string,
nss []*v1.Namespace,
kind schema.GroupKind,
secretsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) {
configs := make([]testutils.RunObjectConfig, 0)
secretConfigs := make([]*testutils.SecretConfig, 0)
smallRCCount, mediumRCCount, bigRCCount := computePodCounts(totalPods)
configs = append(configs, generateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallRCCount, image, command, kind)...)
configs = append(configs, generateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumRCCount, image, command, kind)...)
configs = append(configs, generateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigRCCount, image, command, kind)...)
smallGroupCount, mediumGroupCount, bigGroupCount := computePodCounts(totalPods)
newConfigs, newSecretConfigs := generateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
newConfigs, newSecretConfigs = generateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
newConfigs, newSecretConfigs = generateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
// Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client.
@ -296,26 +321,54 @@ func generateConfigs(totalPods int, image string, command []string, nss []*v1.Na
configs[i].SetClient(clients[i%len(clients)])
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
}
for i := 0; i < len(secretConfigs); i++ {
secretConfigs[i].Client = clients[i%len(clients)]
}
return configs
return configs, secretConfigs
}
func generateConfigsForGroup(
nss []*v1.Namespace, groupName string, size, count int, image string, command []string, kind schema.GroupKind) []testutils.RunObjectConfig {
nss []*v1.Namespace,
groupName string,
size, count int,
image string,
command []string,
kind schema.GroupKind,
secretsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) {
configs := make([]testutils.RunObjectConfig, 0, count)
secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
for i := 1; i <= count; i++ {
namespace := nss[i%len(nss)].Name
secretNames := make([]string, 0, secretsPerPod)
for j := 0; j < secretsPerPod; j++ {
secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: nil, // this will be overwritten later
Name: secretName,
Namespace: namespace,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
baseConfig := &testutils.RCConfig{
Client: nil, // this will be overwritten later
InternalClient: nil, // this will be overwritten later
Name: groupName + "-" + strconv.Itoa(i),
Namespace: nss[i%len(nss)].Name,
Namespace: namespace,
Timeout: 10 * time.Minute,
Image: image,
Command: command,
Replicas: size,
CpuRequest: 10, // 0.01 core
MemRequest: 26214400, // 25MB
SecretNames: secretNames,
}
var config testutils.RunObjectConfig
switch kind {
case api.Kind("ReplicationController"):
@ -329,7 +382,7 @@ func generateConfigsForGroup(
}
configs = append(configs, config)
}
return configs
return configs, secretConfigs
}
func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service {

View File

@ -111,6 +111,9 @@ type RCConfig struct {
// kubelets are running those variables should be nil.
NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{}))
// Names of the secrets to mount
SecretNames []string
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
@ -245,6 +248,10 @@ func (config *DeploymentConfig) create() error {
},
}
if len(config.SecretNames) > 0 {
attachSecrets(&deployment.Spec.Template, config.SecretNames)
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Extensions().Deployments(config.Namespace).Create(deployment)
@ -305,6 +312,10 @@ func (config *ReplicaSetConfig) create() error {
},
}
if len(config.SecretNames) > 0 {
attachSecrets(&rs.Spec.Template, config.SecretNames)
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.Extensions().ReplicaSets(config.Namespace).Create(rs)
@ -398,6 +409,10 @@ func (config *RCConfig) create() error {
},
}
if len(config.SecretNames) > 0 {
attachSecrets(rc.Spec.Template, config.SecretNames)
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.Core().ReplicationControllers(config.Namespace).Create(rc)
@ -926,3 +941,62 @@ func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCrea
return createPod(client, namespace, podCount, basePod)
}
}
type SecretConfig struct {
Content map[string]string
Client clientset.Interface
Name string
Namespace string
// If set this function will be used to print log lines instead of glog.
LogFunc func(fmt string, args ...interface{})
}
func (config *SecretConfig) Run() error {
secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: config.Name,
},
StringData: map[string]string{},
}
for k, v := range config.Content {
secret.StringData[k] = v
}
_, err := config.Client.Core().Secrets(config.Namespace).Create(secret)
if err != nil {
return fmt.Errorf("Error creating secret: %v", err)
}
config.LogFunc("Created secret %v/%v", config.Namespace, config.Name)
return nil
}
func (config *SecretConfig) Stop() error {
if err := config.Client.Core().Secrets(config.Namespace).Delete(config.Name, &v1.DeleteOptions{}); err != nil {
return fmt.Errorf("Error deleting secret: %v", err)
}
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
return nil
}
// TODO: attach secrets using different possibilities: env vars, image pull secrets.
func attachSecrets(template *v1.PodTemplateSpec, secretNames []string) {
volumes := make([]v1.Volume, 0, len(secretNames))
mounts := make([]v1.VolumeMount, 0, len(secretNames))
for _, name := range secretNames {
volumes = append(volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
})
mounts = append(mounts, v1.VolumeMount{
Name: name,
MountPath: fmt.Sprintf("/%v", name),
})
}
template.Spec.Volumes = volumes
template.Spec.Containers[0].VolumeMounts = mounts
}