Merge pull request #87077 from soltysh/remove_run_generators

Remove kubectl run generators
This commit is contained in:
Kubernetes Prow Robot 2020-01-29 12:16:14 -08:00 committed by GitHub
commit dba8d60f8c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 88 additions and 2150 deletions

View File

@ -59,6 +59,7 @@ go_test(
"//staging/src/k8s.io/kubectl/pkg/cmd/delete:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/cmd/testing:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/generate/versioned:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/i18n:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",

View File

@ -58,47 +58,35 @@ import (
)
var (
runLong = templates.LongDesc(i18n.T(`
Create and run a particular image, possibly replicated.
Creates a deployment or job to manage the created container(s).`))
runLong = templates.LongDesc(i18n.T(`Create and run a particular image in a pod.`))
runExample = templates.Examples(i18n.T(`
# Start a single instance of nginx.
# Start a nginx pod.
kubectl run nginx --image=nginx
# Start a single instance of hazelcast and let the container expose port 5701 .
# Start a hazelcast pod and let the container expose port 5701.
kubectl run hazelcast --image=hazelcast/hazelcast --port=5701
# Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container.
# Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container.
kubectl run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default"
# Start a single instance of hazelcast and set labels "app=hazelcast" and "env=prod" in the container.
# Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container.
kubectl run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod"
# Start a replicated instance of nginx.
kubectl run nginx --image=nginx --replicas=5
# Dry run. Print the corresponding API objects without creating them.
kubectl run nginx --image=nginx --dry-run
# Start a single instance of nginx, but overload the spec of the deployment with a partial set of values parsed from JSON.
# Start a nginx pod, but overload the spec with a partial set of values parsed from JSON.
kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
# Start a pod of busybox and keep it in the foreground, don't restart it if it exits.
# Start a busybox pod and keep it in the foreground, don't restart it if it exits.
kubectl run -i -t busybox --image=busybox --restart=Never
# Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command.
# Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command.
kubectl run nginx --image=nginx -- <arg1> <arg2> ... <argN>
# Start the nginx container using a different command and custom arguments.
kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
# Start the perl container to compute π to 2000 places and print it out.
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
# Start the cron job to compute π to 2000 places and print it out every 5 minutes.
kubectl run pi --schedule="0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'`))
# Start the nginx pod using a different command and custom arguments.
kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>`))
)
const (
@ -147,6 +135,8 @@ func NewRunOptions(streams genericclioptions.IOStreams) *RunOptions {
DeleteFlags: delete.NewDeleteFlags("to use to replace the resource."),
RecordFlags: genericclioptions.NewRecordFlags(),
Generator: generateversioned.RunPodV1GeneratorName,
Recorder: genericclioptions.NoopRecorder{},
IOStreams: streams,
@ -157,7 +147,7 @@ func NewCmdRun(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co
o := NewRunOptions(streams)
cmd := &cobra.Command{
Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]",
Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]",
DisableFlagsInUseLine: true,
Short: i18n.T("Run a particular image on the cluster"),
Long: runLong,
@ -181,15 +171,17 @@ func NewCmdRun(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co
func addRunFlags(cmd *cobra.Command, opt *RunOptions) {
cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVar(&opt.Generator, "generator", opt.Generator, i18n.T("The name of the API generator to use, see http://kubernetes.io/docs/user-guide/kubectl-conventions/#generators for a list."))
cmd.Flags().MarkDeprecated("generator", "has no effect and will be removed in the future.")
cmd.Flags().StringVar(&opt.Image, "image", opt.Image, i18n.T("The image for the container to run."))
cmd.MarkFlagRequired("image")
cmd.Flags().String("image-pull-policy", "", i18n.T("The image pull policy for the container. If left empty, this value will not be specified by the client and defaulted by the server"))
cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.")
cmd.Flags().MarkDeprecated("replicas", "has no effect and will be removed in the future.")
cmd.Flags().Bool("rm", false, "If true, delete resources created in this command for attached containers.")
cmd.Flags().String("overrides", "", i18n.T("An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field."))
cmd.Flags().StringArray("env", []string{}, "Environment variables to set in the container")
cmd.Flags().String("serviceaccount", "", "Service account to set in the pod spec")
cmd.Flags().StringVar(&opt.Port, "port", opt.Port, i18n.T("The port that this container exposes. If --expose is true, this is also the port used by the service that is created."))
cmd.Flags().StringArray("env", []string{}, "Environment variables to set in the container.")
cmd.Flags().String("serviceaccount", "", "Service account to set in the pod spec.")
cmd.Flags().StringVar(&opt.Port, "port", opt.Port, i18n.T("The port that this container exposes."))
cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.")
cmd.Flags().StringP("labels", "l", "", "Comma separated labels to apply to the pod(s). Will override previous values.")
cmd.Flags().BoolVarP(&opt.Interactive, "stdin", "i", opt.Interactive, "Keep stdin open on the container(s) in the pod, even if nothing is attached.")
@ -200,11 +192,14 @@ func addRunFlags(cmd *cobra.Command, opt *RunOptions) {
cmd.Flags().Bool("command", false, "If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default.")
cmd.Flags().String("requests", "", i18n.T("The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges."))
cmd.Flags().String("limits", "", i18n.T("The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'. Note that server side components may assign limits depending on the server configuration, such as limit ranges."))
cmd.Flags().BoolVar(&opt.Expose, "expose", opt.Expose, "If true, a public, external service is created for the container(s) which are run")
cmd.Flags().BoolVar(&opt.Expose, "expose", opt.Expose, "If true, service is created for the container(s) which are run")
cmd.Flags().String("service-generator", "service/v2", i18n.T("The name of the generator to use for creating a service. Only used if --expose is true"))
cmd.Flags().MarkDeprecated("service-generator", "and will be removed in the future.")
cmd.Flags().String("service-overrides", "", i18n.T("An inline JSON override for the generated service object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. Only used if --expose is true."))
cmd.Flags().MarkDeprecated("service-overrides", "and will be removed in the future.")
cmd.Flags().BoolVar(&opt.Quiet, "quiet", opt.Quiet, "If true, suppress prompt messages.")
cmd.Flags().StringVar(&opt.Schedule, "schedule", opt.Schedule, i18n.T("A schedule in the Cron format the job should be run with."))
cmd.Flags().MarkDeprecated("schedule", "has no effect and will be removed in the future.")
}
func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error {
@ -308,48 +303,10 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e
return err
}
clientset, err := f.KubernetesClientSet()
if err != nil {
return err
}
generatorName := o.Generator
if len(o.Schedule) != 0 && len(generatorName) == 0 {
generatorName = generateversioned.CronJobV1Beta1GeneratorName
}
if len(generatorName) == 0 {
switch restartPolicy {
case corev1.RestartPolicyAlways:
generatorName = generateversioned.DeploymentAppsV1GeneratorName
case corev1.RestartPolicyOnFailure:
generatorName = generateversioned.JobV1GeneratorName
case corev1.RestartPolicyNever:
generatorName = generateversioned.RunPodV1GeneratorName
}
// Falling back because the generator was not provided and the default one could be unavailable.
generatorNameTemp, err := generateversioned.FallbackGeneratorNameIfNecessary(generatorName, clientset.Discovery(), o.ErrOut)
if err != nil {
return err
}
if generatorNameTemp != generatorName {
cmdutil.Warning(o.ErrOut, generatorName, generatorNameTemp)
} else {
generatorName = generatorNameTemp
}
}
generators := generateversioned.GeneratorFn("run")
generator, found := generators[generatorName]
generator, found := generators[generateversioned.RunPodV1GeneratorName]
if !found {
return cmdutil.UsageErrorf(cmd, "generator %q not found", generatorName)
}
// start deprecating all generators except for 'run-pod/v1' which will be
// the only supported on a route to simple kubectl run which should mimic
// docker run
if generatorName != generateversioned.RunPodV1GeneratorName {
fmt.Fprintf(o.ErrOut, "kubectl run --generator=%s is DEPRECATED and will be removed in a future version. Use kubectl run --generator=%s or kubectl create instead.\n", generatorName, generateversioned.RunPodV1GeneratorName)
return cmdutil.UsageErrorf(cmd, "generator %q not found", o.Generator)
}
names := generator.ParamNames()

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubectl/pkg/cmd/delete"
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
generateversioned "k8s.io/kubectl/pkg/generate/versioned"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util/i18n"
)
@ -174,7 +175,7 @@ func TestRunArgsFollowDashRules(t *testing.T) {
GroupVersion: corev1.SchemeGroupVersion,
NegotiatedSerializer: ns,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
if req.URL.Path == "/namespaces/test/replicationcontrollers" {
if req.URL.Path == "/namespaces/test/pods" {
return &http.Response{StatusCode: http.StatusCreated, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, rc)}, nil
}
return &http.Response{
@ -188,7 +189,6 @@ func TestRunArgsFollowDashRules(t *testing.T) {
cmd := NewCmdRun(tf, genericclioptions.NewTestIOStreamsDiscard())
cmd.Flags().Set("image", "nginx")
cmd.Flags().Set("generator", "run/v1")
printFlags := genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme)
printer, err := printFlags.ToPrinter()
@ -205,7 +205,7 @@ func TestRunArgsFollowDashRules(t *testing.T) {
IOStreams: genericclioptions.NewTestIOStreamsDiscard(),
Image: "nginx",
Generator: "run/v1",
Generator: generateversioned.RunPodV1GeneratorName,
PrintObj: func(obj runtime.Object) error {
return printer.PrintObj(obj, os.Stdout)

View File

@ -29,9 +29,6 @@ go_library(
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/batch/v2alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
@ -77,18 +74,12 @@ go_test(
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/batch/v2alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",

View File

@ -22,10 +22,6 @@ import (
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
@ -39,7 +35,6 @@ const (
// TODO(sig-cli): Enforce consistent naming for generators here.
// See discussion in https://github.com/kubernetes/kubernetes/issues/46237
// before you add any more.
RunV1GeneratorName = "run/v1"
RunPodV1GeneratorName = "run-pod/v1"
ServiceV1GeneratorName = "service/v1"
ServiceV2GeneratorName = "service/v2"
@ -49,15 +44,9 @@ const (
ServiceExternalNameGeneratorV1Name = "service-externalname/v1"
ServiceAccountV1GeneratorName = "serviceaccount/v1"
HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1"
DeploymentV1Beta1GeneratorName = "deployment/v1beta1"
DeploymentAppsV1Beta1GeneratorName = "deployment/apps.v1beta1"
DeploymentAppsV1GeneratorName = "deployment/apps.v1"
DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1"
DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1"
DeploymentBasicAppsV1GeneratorName = "deployment-basic/apps.v1"
JobV1GeneratorName = "job/v1"
CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1"
CronJobV1Beta1GeneratorName = "cronjob/v1beta1"
NamespaceV1GeneratorName = "namespace/v1"
ResourceQuotaV1GeneratorName = "resourcequotas/v1"
SecretV1GeneratorName = "secret/v1"
@ -104,14 +93,7 @@ func DefaultGenerators(cmdName string) map[string]generate.Generator {
generator = map[string]generate.Generator{}
case "run":
generator = map[string]generate.Generator{
RunV1GeneratorName: BasicReplicationController{},
RunPodV1GeneratorName: BasicPod{},
DeploymentV1Beta1GeneratorName: DeploymentV1Beta1{},
DeploymentAppsV1Beta1GeneratorName: DeploymentAppsV1Beta1{},
DeploymentAppsV1GeneratorName: DeploymentAppsV1{},
JobV1GeneratorName: JobV1{},
CronJobV2Alpha1GeneratorName: CronJobV2Alpha1{},
CronJobV1Beta1GeneratorName: CronJobV1Beta1{},
RunPodV1GeneratorName: BasicPod{},
}
case "namespace":
generator = map[string]generate.Generator{
@ -150,30 +132,6 @@ func FallbackGeneratorNameIfNecessary(
cmdErr io.Writer,
) (string, error) {
switch generatorName {
case DeploymentAppsV1GeneratorName:
hasResource, err := HasResource(discoveryClient, appsv1.SchemeGroupVersion.WithResource("deployments"))
if err != nil {
return "", err
}
if !hasResource {
return FallbackGeneratorNameIfNecessary(DeploymentAppsV1Beta1GeneratorName, discoveryClient, cmdErr)
}
case DeploymentAppsV1Beta1GeneratorName:
hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments"))
if err != nil {
return "", err
}
if !hasResource {
return FallbackGeneratorNameIfNecessary(DeploymentV1Beta1GeneratorName, discoveryClient, cmdErr)
}
case DeploymentV1Beta1GeneratorName:
hasResource, err := HasResource(discoveryClient, extensionsv1beta1.SchemeGroupVersion.WithResource("deployments"))
if err != nil {
return "", err
}
if !hasResource {
return RunV1GeneratorName, nil
}
case DeploymentBasicAppsV1GeneratorName:
hasResource, err := HasResource(discoveryClient, appsv1.SchemeGroupVersion.WithResource("deployments"))
if err != nil {
@ -190,30 +148,6 @@ func FallbackGeneratorNameIfNecessary(
if !hasResource {
return DeploymentBasicV1Beta1GeneratorName, nil
}
case JobV1GeneratorName:
hasResource, err := HasResource(discoveryClient, batchv1.SchemeGroupVersion.WithResource("jobs"))
if err != nil {
return "", err
}
if !hasResource {
return RunPodV1GeneratorName, nil
}
case CronJobV1Beta1GeneratorName:
hasResource, err := HasResource(discoveryClient, batchv1beta1.SchemeGroupVersion.WithResource("cronjobs"))
if err != nil {
return "", err
}
if !hasResource {
return FallbackGeneratorNameIfNecessary(CronJobV2Alpha1GeneratorName, discoveryClient, cmdErr)
}
case CronJobV2Alpha1GeneratorName:
hasResource, err := HasResource(discoveryClient, batchv2alpha1.SchemeGroupVersion.WithResource("cronjobs"))
if err != nil {
return "", err
}
if !hasResource {
return JobV1GeneratorName, nil
}
}
return generatorName, nil
}

View File

@ -21,13 +21,7 @@ import (
"strconv"
"strings"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
"k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -35,270 +29,6 @@ import (
"k8s.io/kubectl/pkg/generate"
)
type DeploymentV1Beta1 struct{}
func (DeploymentV1Beta1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "replicas", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "serviceaccount", Required: false},
}
}
func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
count, err := strconv.Atoi(params["replicas"])
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
count32 := int32(count)
deployment := extensionsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: &count32,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
}
return &deployment, nil
}
type DeploymentAppsV1Beta1 struct{}
func (DeploymentAppsV1Beta1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "replicas", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "serviceaccount", Required: false},
}
}
func (DeploymentAppsV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
count, err := strconv.Atoi(params["replicas"])
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
count32 := int32(count)
deployment := appsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: appsv1beta1.DeploymentSpec{
Replicas: &count32,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
}
return &deployment, nil
}
type DeploymentAppsV1 struct{}
func (DeploymentAppsV1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "replicas", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "serviceaccount", Required: false},
}
}
func (DeploymentAppsV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
count, err := strconv.Atoi(params["replicas"])
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
count32 := int32(count)
deployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &count32,
Selector: &metav1.LabelSelector{MatchLabels: labels},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
}
return &deployment, nil
}
// getLabels returns map of labels.
func getLabels(params map[string]string, name string) (map[string]string, error) {
labelString, found := params["labels"]
@ -376,325 +106,6 @@ func getEnvs(genericParams map[string]interface{}) ([]v1.EnvVar, error) {
return envs, nil
}
type JobV1 struct{}
func (JobV1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "leave-stdin-open", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "restart", Required: false},
{Name: "serviceaccount", Required: false},
}
}
func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false)
if err != nil {
return nil, err
}
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
restartPolicy := v1.RestartPolicy(params["restart"])
if len(restartPolicy) == 0 {
restartPolicy = v1.RestartPolicyNever
}
podSpec.RestartPolicy = restartPolicy
job := batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
}
return &job, nil
}
type CronJobV2Alpha1 struct{}
func (CronJobV2Alpha1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "leave-stdin-open", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "restart", Required: false},
{Name: "schedule", Required: true},
{Name: "serviceaccount", Required: false},
}
}
func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false)
if err != nil {
return nil, err
}
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
restartPolicy := v1.RestartPolicy(params["restart"])
if len(restartPolicy) == 0 {
restartPolicy = v1.RestartPolicyNever
}
podSpec.RestartPolicy = restartPolicy
cronJob := batchv2alpha1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: batchv2alpha1.CronJobSpec{
Schedule: params["schedule"],
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
JobTemplate: batchv2alpha1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
},
},
}
return &cronJob, nil
}
type CronJobV1Beta1 struct{}
func (CronJobV1Beta1) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "leave-stdin-open", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "restart", Required: false},
{Name: "schedule", Required: true},
{Name: "serviceaccount", Required: false},
}
}
func (CronJobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
leaveStdinOpen, err := generate.GetBool(params, "leave-stdin-open", false)
if err != nil {
return nil, err
}
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
restartPolicy := v1.RestartPolicy(params["restart"])
if len(restartPolicy) == 0 {
restartPolicy = v1.RestartPolicyNever
}
podSpec.RestartPolicy = restartPolicy
cronJob := batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: batchv1beta1.CronJobSpec{
Schedule: params["schedule"],
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
},
},
}
return &cronJob, nil
}
type BasicReplicationController struct{}
func (BasicReplicationController) ParamNames() []generate.GeneratorParam {
return []generate.GeneratorParam{
{Name: "labels", Required: false},
{Name: "default-name", Required: false},
{Name: "name", Required: true},
{Name: "replicas", Required: true},
{Name: "image", Required: true},
{Name: "image-pull-policy", Required: false},
{Name: "port", Required: false},
{Name: "hostport", Required: false},
{Name: "stdin", Required: false},
{Name: "tty", Required: false},
{Name: "command", Required: false},
{Name: "args", Required: false},
{Name: "env", Required: false},
{Name: "requests", Required: false},
{Name: "limits", Required: false},
{Name: "serviceaccount", Required: false},
}
}
// populateResourceListV1 takes strings of form <resourceName1>=<value1>,<resourceName1>=<value2>
// and returns ResourceList.
func populateResourceListV1(spec string) (v1.ResourceList, error) {
@ -737,103 +148,6 @@ func HandleResourceRequirementsV1(params map[string]string) (v1.ResourceRequirem
return result, nil
}
// makePodSpec returns PodSpec filled with passed parameters.
func makePodSpec(params map[string]string, name string) (*v1.PodSpec, error) {
stdin, err := generate.GetBool(params, "stdin", false)
if err != nil {
return nil, err
}
tty, err := generate.GetBool(params, "tty", false)
if err != nil {
return nil, err
}
resourceRequirements, err := HandleResourceRequirementsV1(params)
if err != nil {
return nil, err
}
spec := v1.PodSpec{
ServiceAccountName: params["serviceaccount"],
Containers: []v1.Container{
{
Name: name,
Image: params["image"],
Stdin: stdin,
TTY: tty,
Resources: resourceRequirements,
},
},
}
return &spec, nil
}
func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
args, err := getArgs(genericParams)
if err != nil {
return nil, err
}
envs, err := getEnvs(genericParams)
if err != nil {
return nil, err
}
params, err := getParams(genericParams)
if err != nil {
return nil, err
}
name, err := getName(params)
if err != nil {
return nil, err
}
labels, err := getLabels(params, name)
if err != nil {
return nil, err
}
count, err := strconv.Atoi(params["replicas"])
if err != nil {
return nil, err
}
podSpec, err := makePodSpec(params, name)
if err != nil {
return nil, err
}
imagePullPolicy := v1.PullPolicy(params["image-pull-policy"])
if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil {
return nil, err
}
if err := updatePodPorts(params, podSpec); err != nil {
return nil, err
}
count32 := int32(count)
controller := v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &count32,
Selector: labels,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: *podSpec,
},
},
}
return &controller, nil
}
// updatePodContainers updates PodSpec.Containers with passed parameters.
func updatePodContainers(params map[string]string, args []string, envs []v1.EnvVar, imagePullPolicy v1.PullPolicy, podSpec *v1.PodSpec) error {
if len(args) > 0 {

View File

@ -20,406 +20,10 @@ import (
"reflect"
"testing"
appsv1beta1 "k8s.io/api/apps/v1beta1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
"k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGenerate(t *testing.T) {
one := int32(1)
tests := []struct {
name string
params map[string]interface{}
expected *v1.ReplicationController
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"image-pull-policy": "Always",
"replicas": "1",
"port": "",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
ImagePullPolicy: v1.PullAlways,
},
},
},
},
},
},
},
{
name: "test2",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"port": "",
"env": []string{"a=b", "c=d"},
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
},
},
},
},
},
},
},
{
name: "test3",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"image-pull-policy": "Never",
"replicas": "1",
"port": "",
"args": []string{"bar", "baz", "blah"},
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
ImagePullPolicy: v1.PullNever,
Args: []string{"bar", "baz", "blah"},
},
},
},
},
},
},
},
{
name: "test3",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"port": "",
"args": []string{"bar", "baz", "blah"},
"command": "true",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Command: []string{"bar", "baz", "blah"},
},
},
},
},
},
},
},
{
name: "test4",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"port": "80",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
},
},
},
},
},
},
},
},
},
{
name: "test5",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"image-pull-policy": "IfNotPresent",
"replicas": "1",
"port": "80",
"hostport": "80",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run": "foo"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"run": "foo"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"run": "foo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
ImagePullPolicy: v1.PullIfNotPresent,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
},
},
},
},
},
},
},
{
name: "test6",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"hostport": "80",
},
expected: nil,
expectErr: true,
},
{
name: "test7",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"labels": "foo=bar,baz=blah",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"foo": "bar", "baz": "blah"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
},
},
},
},
},
},
},
{
name: "test8",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"hostport": "80",
},
expected: nil,
expectErr: true,
},
{
name: "test9",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"labels": "foo=bar,baz=blah",
"requests": "cpu100m,memory=100Mi",
},
expected: nil,
expectErr: true,
},
{
name: "test10",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"labels": "foo=bar,baz=blah",
"requests": "cpu=100m&memory=100Mi",
},
expected: nil,
expectErr: true,
},
{
name: "test11",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"labels": "foo=bar,baz=blah",
"requests": "cpu=",
},
expected: nil,
expectErr: true,
},
{
name: "test12",
params: map[string]interface{}{
"name": "foo",
"image": "someimage",
"replicas": "1",
"labels": "foo=bar,baz=blah",
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
},
expected: &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.ReplicationControllerSpec{
Replicas: &one,
Selector: map[string]string{"foo": "bar", "baz": "blah"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
}
generator := BasicReplicationController{}
for i, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
t.Logf("%d: %#v", i, obj)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*v1.ReplicationController).Spec.Template, tt.expected.Spec.Template) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected.Spec.Template, obj.(*v1.ReplicationController).Spec.Template)
}
})
}
}
func TestGeneratePod(t *testing.T) {
tests := []struct {
name string
@ -668,499 +272,6 @@ func TestGeneratePod(t *testing.T) {
}
}
func TestGenerateDeployment(t *testing.T) {
three := int32(3)
tests := []struct {
name string
params map[string]interface{}
expected *extensionsv1beta1.Deployment
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"labels": "foo=bar,baz=blah",
"name": "foo",
"replicas": "3",
"image": "someimage",
"image-pull-policy": "Always",
"port": "80",
"hostport": "80",
"stdin": "true",
"command": "true",
"args": []string{"bar", "baz", "blah"},
"env": []string{"a=b", "c=d"},
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
},
expected: &extensionsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: &three,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar", "baz": "blah"}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
ImagePullPolicy: v1.PullAlways,
Stdin: true,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
Command: []string{"bar", "baz", "blah"},
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
}
generator := DeploymentV1Beta1{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*extensionsv1beta1.Deployment), tt.expected) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected, obj.(*extensionsv1beta1.Deployment))
}
})
}
}
func TestGenerateAppsDeployment(t *testing.T) {
three := int32(3)
tests := []struct {
name string
params map[string]interface{}
expected *appsv1beta1.Deployment
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"labels": "foo=bar,baz=blah",
"name": "foo",
"replicas": "3",
"image": "someimage",
"image-pull-policy": "Always",
"port": "80",
"hostport": "80",
"stdin": "true",
"command": "true",
"args": []string{"bar", "baz", "blah"},
"env": []string{"a=b", "c=d"},
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
},
expected: &appsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: appsv1beta1.DeploymentSpec{
Replicas: &three,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar", "baz": "blah"}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
ImagePullPolicy: v1.PullAlways,
Stdin: true,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
Command: []string{"bar", "baz", "blah"},
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
}
generator := DeploymentAppsV1Beta1{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*appsv1beta1.Deployment), tt.expected) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected, obj.(*appsv1beta1.Deployment))
}
})
}
}
func TestGenerateJob(t *testing.T) {
tests := []struct {
name string
params map[string]interface{}
expected *batchv1.Job
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"labels": "foo=bar,baz=blah",
"name": "foo",
"image": "someimage",
"port": "80",
"hostport": "80",
"stdin": "true",
"leave-stdin-open": "true",
"command": "true",
"args": []string{"bar", "baz", "blah"},
"env": []string{"a=b", "c=d"},
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
"restart": "OnFailure",
},
expected: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Stdin: true,
StdinOnce: false,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
Command: []string{"bar", "baz", "blah"},
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
}
generator := JobV1{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*batchv1.Job), tt.expected) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected, obj.(*batchv1.Job))
}
})
}
}
func TestGenerateCronJobAlpha(t *testing.T) {
tests := []struct {
name string
params map[string]interface{}
expected *batchv2alpha1.CronJob
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"labels": "foo=bar,baz=blah",
"name": "foo",
"image": "someimage",
"port": "80",
"hostport": "80",
"stdin": "true",
"leave-stdin-open": "true",
"command": "true",
"args": []string{"bar", "baz", "blah"},
"env": []string{"a=b", "c=d"},
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
"restart": "OnFailure",
"schedule": "0/5 * * * ?",
},
expected: &batchv2alpha1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: batchv2alpha1.CronJobSpec{
Schedule: "0/5 * * * ?",
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
JobTemplate: batchv2alpha1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Stdin: true,
StdinOnce: false,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
Command: []string{"bar", "baz", "blah"},
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
},
},
}
generator := CronJobV2Alpha1{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*batchv2alpha1.CronJob), tt.expected) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected, obj.(*batchv2alpha1.CronJob))
}
})
}
}
func TestGenerateCronJobBeta(t *testing.T) {
tests := []struct {
name string
params map[string]interface{}
expected *batchv1beta1.CronJob
expectErr bool
}{
{
name: "test1",
params: map[string]interface{}{
"labels": "foo=bar,baz=blah",
"name": "foo",
"image": "someimage",
"port": "80",
"hostport": "80",
"stdin": "true",
"leave-stdin-open": "true",
"command": "true",
"args": []string{"bar", "baz", "blah"},
"env": []string{"a=b", "c=d"},
"requests": "cpu=100m,memory=100Mi",
"limits": "cpu=400m,memory=200Mi",
"restart": "OnFailure",
"schedule": "0/5 * * * ?",
},
expected: &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "0/5 * * * ?",
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"foo": "bar", "baz": "blah"},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "foo",
Image: "someimage",
Stdin: true,
StdinOnce: false,
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
HostPort: 80,
},
},
Command: []string{"bar", "baz", "blah"},
Env: []v1.EnvVar{
{
Name: "a",
Value: "b",
},
{
Name: "c",
Value: "d",
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("400m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
},
},
},
},
},
},
},
},
},
},
}
generator := CronJobV1Beta1{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, err := generator.Generate(tt.params)
if !tt.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
if tt.expectErr && err != nil {
return
}
if !reflect.DeepEqual(obj.(*batchv1beta1.CronJob), tt.expected) {
t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", tt.expected, obj.(*batchv1beta1.CronJob))
}
})
}
}
func TestParseEnv(t *testing.T) {
tests := []struct {
name string

View File

@ -34,7 +34,7 @@ run_job_tests() {
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
### Create a cronjob in a specific namespace
kubectl run pi --schedule="59 23 31 2 *" --namespace=test-jobs --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
# Post-Condition: assertion object exists
kube::test::get_object_assert 'cronjob/pi --namespace=test-jobs' "{{$id_field}}" 'pi'
kubectl get cronjob/pi --namespace=test-jobs

View File

@ -1010,18 +1010,18 @@ __EOF__
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create deployent and service
# Pre-condition: no deployment exists
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod and service
# Pre-condition: no pod exists
kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run testmetadata --image=nginx --replicas=2 --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
kubectl run testmetadata --image=nginx --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
# Check result
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]"
### Expose deployment as a new service
### Expose pod as a new service
# Command
kubectl expose deployment testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
kubectl expose pod testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
# Check result
kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]"
@ -1031,7 +1031,7 @@ __EOF__
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
kubectl delete deployment testmetadata "${kube_flags[@]}"
kubectl delete pod testmetadata "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
fi

View File

@ -95,10 +95,10 @@ run_create_job_tests() {
# Test kubectl create job from cronjob
# Pre-Condition: create a cronjob
kubectl run test-pi --schedule="* */5 * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(10)'
kubectl create cronjob test-pi --schedule="* */5 * * *" "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(10)'
kubectl create job my-pi --from=cronjob/test-pi
# Post-condition: container args contain expected command
output_message=$(kubectl get job my-pi -o go-template='{{(index .spec.template.spec.containers 0).args}}' "${kube_flags[@]}")
output_message=$(kubectl get job my-pi -o go-template='{{(index .spec.template.spec.containers 0).command}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "perl -Mbignum=bpi -wle print bpi(10)"
# Clean up

View File

@ -572,15 +572,6 @@ runTests() {
record_command run_crd_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_cmd_with_img_tests
fi
#####################################
# Recursive Resources via directory #
#####################################

View File

@ -24,77 +24,20 @@ run_kubectl_run_tests() {
create_and_use_new_namespace
kube::log::status "Testing kubectl run"
## kubectl run should create deployments, jobs or cronjob
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Pre-Condition: no Pod exists
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=${IMAGE_PERL}" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
kubectl run nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]:?}"
# Post-Condition: Pod "nginx" is created
kube::test::get_object_assert pod "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# new generator was used
output_message=$(kubectl get deployment.apps/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '10'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=${IMAGE_NGINX}" --generator=deployment/apps.v1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '10'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
# Pre-Condition: no Job exists
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=${IMAGE_PERL}" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: CronJob "pi" is created
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Pre-condition: cronjob has perl image, not custom image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_not_string "${output_message}" "custom-image"
kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
# Set cronjob image
kubectl set image cronjob/pi '*=custom-image'
# Post-condition: cronjob has custom image, not perl image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_string "${output_message}" "custom-image"
kube::test::if_has_not_string "${output_message}" "${IMAGE_PERL}"
# Clean up
kubectl delete cronjobs pi "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_cmd_with_img_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing cmd with image"
kubectl delete pod nginx-extensions "${kube_flags[@]}"
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment.apps/test1 created'
kubectl delete deployments test1
kube::test::if_has_string "${output_message}" 'pod/test1 created'
kubectl delete pods test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'

View File

@ -63,21 +63,21 @@ run_save_config_tests() {
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc nginx -o yaml "${kube_flags[@]}")"
# Pre-Condition: no pods exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the pod "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config "${kube_flags[@]}"
# Post-Condition: pod "nginx" has configuration annotation
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pod nginx -o yaml "${kube_flags[@]}")"
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
kubectl expose pod nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get svc nginx -o yaml "${kube_flags[@]}")"
# Clean up
kubectl delete rc,svc nginx
kubectl delete pod,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''

View File

@ -233,14 +233,9 @@ test/e2e/kubectl/kubectl.go: "should update the label on a resource"
test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs"
test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc"
test/e2e/kubectl/kubectl.go: "should check is all data is printed"
test/e2e/kubectl/kubectl.go: "should create an rc or deployment from an image"
test/e2e/kubectl/kubectl.go: "should create an rc from an image"
test/e2e/kubectl/kubectl.go: "should support rolling-update to same image"
test/e2e/kubectl/kubectl.go: "should create a deployment from an image"
test/e2e/kubectl/kubectl.go: "should create a job from an image when restart is OnFailure"
test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never"
test/e2e/kubectl/kubectl.go: "should update a single-container pod's image"
test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the job"
test/e2e/kubectl/kubectl.go: "should support proxy with --port 0"
test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path"
test/e2e/network/dns.go: "should provide DNS for the cluster"

View File

@ -42,11 +42,9 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/endpoints:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/integration/etcd:go_default_library",

View File

@ -70,11 +70,9 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
"k8s.io/kubernetes/test/integration/etcd"
@ -102,13 +100,13 @@ const (
pausePodName = "pause"
busyboxPodSelector = "app=busybox1"
busyboxPodName = "busybox1"
runJobTimeout = 5 * time.Minute
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
agnhostControllerFilename = "agnhost-master-controller.json.in"
agnhostServiceFilename = "agnhost-master-service.json"
httpdDeployment1Filename = "httpd-deployment1.yaml.in"
httpdDeployment2Filename = "httpd-deployment2.yaml.in"
httpdDeployment3Filename = "httpd-deployment3.yaml.in"
httpdRCFilename = "httpd-rc.yaml.in"
metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
)
@ -209,56 +207,6 @@ func runKubectlRetryOrDie(ns string, args ...string) string {
return output
}
// duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients
var _ = SIGDescribe("Kubectl alpha client", func() {
defer ginkgo.GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
var c clientset.Interface
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
ginkgo.Describe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob-alpha"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "cronjobs", cjName, nsFlag)
})
ginkgo.It("should create a CronJob", func() {
e2eskipper.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceAlpha, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie(ns, "run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
ginkgo.By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
})
var _ = SIGDescribe("Kubectl client", func() {
defer ginkgo.GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
@ -697,7 +645,7 @@ var _ = SIGDescribe("Kubectl client", func() {
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(gomega.BeNil())
gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test", nil)).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach without stdin")
runOutput = framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
@ -705,7 +653,8 @@ var _ = SIGDescribe("Kubectl client", func() {
ExecOrDie(ns)
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(gomega.BeNil())
gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test-2", nil)).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
runOutput = framework.NewKubectlCommand(ns, nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
@ -731,7 +680,7 @@ var _ = SIGDescribe("Kubectl client", func() {
})
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(gomega.BeNil())
gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test-3", nil)).To(gomega.BeNil())
})
ginkgo.It("should contain last line of the log", func() {
@ -739,7 +688,7 @@ var _ = SIGDescribe("Kubectl client", func() {
podName := "run-log-test"
ginkgo.By("executing a command with run")
framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready")
@ -1467,7 +1416,7 @@ metadata:
ginkgo.By("creating an pod")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
// Agnhost image generates logs for a total of 100 lines over 20s.
framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+agnhostImage, nsFlag, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, nsFlag, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pod", podName, nsFlag)
@ -1589,112 +1538,18 @@ metadata:
})
})
ginkgo.Describe("Kubectl run default", func() {
var nsFlag string
var name string
var cleanUp func()
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
name = "e2e-test-httpd-deployment"
cleanUp = func() { framework.RunKubectlOrDie(ns, "delete", "deployment", name, nsFlag) }
})
ginkgo.AfterEach(func() {
cleanUp()
})
/*
Release : v1.9
Testname: Kubectl, run default
Description: Command kubectl run MUST create a running pod with possible replicas given a image using the option --image=httpd. The running Pod SHOULD have one container and the container SHOULD be running the image specified in the run command.
*/
framework.ConformanceIt("should create an rc or deployment from an image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", name, "--image="+httpdImage, nsFlag)
ginkgo.By("verifying the pod controlled by " + name + " gets created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by %s: %v", name, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
}
})
})
ginkgo.Describe("Kubectl run rc", func() {
var nsFlag string
var rcName string
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-httpd-rc"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "rc", rcName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, run rc
Description: Command kubectl run MUST create a running rc with default one replicas given a image using the option --image=httpd. The running replication controller SHOULD have one container and the container SHOULD be running the image specified in the run command. Also there MUST be 1 pod controlled by this replica set running 1 container with the image specified. A kubetctl logs command MUST return the logs from the container in the replication controller.
*/
framework.ConformanceIt("should create an rc from an image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag)
ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage)
}
ginkgo.By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
}
ginkgo.By("confirm that you can get logs from an rc")
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
framework.Failf("Pods for rc %s were not ready", rcName)
}
_, err = framework.RunKubectl(ns, "logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") {
framework.Failf("Failed getting logs by rc %s: %v", rcName, err)
}
})
})
ginkgo.Describe("Kubectl rolling-update", func() {
var nsFlag string
var rcName string
var httpdRC string
var c clientset.Interface
ginkgo.BeforeEach(func() {
c = f.ClientSet
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-httpd-rc"
rcName = "httpd-rc"
httpdRC = commonutils.SubstituteImageName(string(readTestFileOrDie(httpdRCFilename)))
})
ginkgo.AfterEach(func() {
@ -1708,16 +1563,7 @@ metadata:
*/
framework.ConformanceIt("should support rolling-update to same image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", rcName, "--image="+httpdImage, "--generator=run/v1", nsFlag)
ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage)
}
framework.RunKubectlOrDieInput(ns, httpdRC, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
waitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
ginkgo.By("rolling-update to same image controller")
@ -1728,134 +1574,6 @@ metadata:
})
})
ginkgo.Describe("Kubectl run deployment", func() {
var nsFlag string
var dName string
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
dName = "e2e-test-httpd-deployment"
})
ginkgo.AfterEach(func() {
err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
out, err := framework.RunKubectl(ns, "delete", "deployment", dName, nsFlag)
if err != nil {
if strings.Contains(err.Error(), "could not find default credentials") {
err = nil
}
return false, fmt.Errorf("kubectl delete failed output: %s, err: %v", out, err)
}
return true, nil
})
framework.ExpectNoError(err)
})
/*
Release : v1.9
Testname: Kubectl, run deployment
Description: Command kubectl run MUST create a deployment, with --generator=deployment, when a image name is specified in the run command. After the run command there SHOULD be a deployment that should exist with one container running the specified image. Also there SHOULD be a Pod that is controlled by this deployment, with a container running the specified image.
*/
framework.ConformanceIt("should create a deployment from an image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", dName, "--image="+httpdImage, "--generator=deployment/apps.v1", nsFlag)
ginkgo.By("verifying the deployment " + dName + " was created")
d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, httpdImage)
}
ginkgo.By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie(ns, "get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
}
})
})
ginkgo.Describe("Kubectl run job", func() {
var nsFlag string
var jobName string
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
jobName = "e2e-test-httpd-job"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "jobs", jobName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, run job
Description: Command kubectl run MUST create a job, with --generator=job, when a image name is specified in the run command. After the run command there SHOULD be a job that should exist with one container running the specified image. Also there SHOULD be a restart policy on the job spec that SHOULD match the command line.
*/
framework.ConformanceIt("should create a job from an image when restart is OnFailure ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+httpdImage, nsFlag)
ginkgo.By("verifying the job " + jobName + " was created")
job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, httpdImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
}
})
})
ginkgo.Describe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
ginkgo.BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob-beta"
})
ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "cronjobs", cjName, nsFlag)
})
ginkgo.It("should create a CronJob", func() {
e2eskipper.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceBeta, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie(ns, "run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
ginkgo.By("verifying the CronJob " + cjName + " was created")
cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if cj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
ginkgo.Describe("Kubectl run pod", func() {
var nsFlag string
var podName string
@ -1872,11 +1590,11 @@ metadata:
/*
Release : v1.9
Testname: Kubectl, run pod
Description: Command kubectl run MUST create a pod, with --generator=run-pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
Description: Command kubectl run MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+httpdImage, nsFlag)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--image="+httpdImage, nsFlag)
ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
@ -1912,7 +1630,7 @@ metadata:
*/
framework.ConformanceIt("should update a single-container pod's image ", func() {
ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--generator=run-pod/v1", "--image="+httpdImage, "--labels=run="+podName, nsFlag)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName, nsFlag)
ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
@ -1943,37 +1661,6 @@ metadata:
})
})
ginkgo.Describe("Kubectl run --rm job", func() {
jobName := "e2e-test-rm-busybox-job"
/*
Release : v1.9
Testname: Kubectl, run job with --rm
Description: Start a job with a Pod using kubectl run but specify --rm=true. Wait for the Pod to start running by verifying that there is output as expected. Now verify that the job has exited and cannot be found. With --rm=true option the job MUST start by running the image specified and then get deleted itself.
*/
framework.ConformanceIt("should create a job from an image, then delete the job ", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
ginkgo.By("executing a command with run --rm and attach with stdin")
t := time.NewTimer(runJobTimeout)
defer t.Stop()
runOutput := framework.NewKubectlCommand(ns, nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
WithTimeout(t.C).
ExecOrDie(ns)
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
err := e2ejob.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
framework.ExpectNoError(err)
ginkgo.By("verifying the job " + jobName + " was deleted")
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
framework.ExpectError(err)
framework.ExpectEqual(apierrors.IsNotFound(err), true)
})
})
ginkgo.Describe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
/*

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: httpd-rc
spec:
replicas: 1
selector:
run: httpd-rc
template:
metadata:
labels:
run: httpd-rc
spec:
containers:
- image: {{.HttpdNewImage}}
name: httpd-rc