mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #25816 from soltysh/scheduledjob_kubectl
Automatic merge from submit-queue ScheduledJob kubectl @erictune the last part (kubectl) of the ScheduledJob, as usual builds on top of previous PRs, so only last 2 commits matter (the kubectl one and storage leftovers). ```release-note * Introducing ScheduledJobs as described in [the proposal](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/scheduledjob.md) as part of `batch/v2alpha1` version (experimental feature). ``` []()
This commit is contained in:
commit
7921a9ce67
@ -143,6 +143,8 @@ func Run(s *options.APIServer) error {
|
||||
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
|
||||
s.StorageConfig, s.DefaultStorageMediaType, api.Codecs,
|
||||
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
|
||||
// FIXME: this GroupVersionResource override should be configurable
|
||||
[]unversioned.GroupVersionResource{batch.Resource("scheduledjobs").WithVersion("v2alpha1")},
|
||||
master.DefaultAPIResourceConfigSource(), s.RuntimeConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("error in initializing storage factory: %s", err)
|
||||
|
@ -71,7 +71,7 @@ func Run(s *genericoptions.ServerRunOptions) error {
|
||||
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
|
||||
s.StorageConfig, s.DefaultStorageMediaType, api.Codecs,
|
||||
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
|
||||
resourceConfig, s.RuntimeConfig)
|
||||
[]unversioned.GroupVersionResource{}, resourceConfig, s.RuntimeConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("error in initializing storage factory: %s", err)
|
||||
}
|
||||
|
@ -169,6 +169,14 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
|
||||
j.ManualSelector = nil
|
||||
}
|
||||
},
|
||||
func(sj *batch.ScheduledJobSpec, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(sj)
|
||||
suspend := c.RandBool()
|
||||
sj.Suspend = &suspend
|
||||
sds := int64(c.RandUint64())
|
||||
sj.StartingDeadlineSeconds = &sds
|
||||
sj.Schedule = c.RandString()
|
||||
},
|
||||
func(cp *batch.ConcurrencyPolicy, c fuzz.Continue) {
|
||||
policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent}
|
||||
*cp = policies[c.Rand.Intn(len(policies))]
|
||||
|
@ -3518,11 +3518,12 @@ func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.StartingDeadlineSeconds != nil
|
||||
yyq2[2] = x.ConcurrencyPolicy != ""
|
||||
yyq2[3] = x.Suspend != nil
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(5)
|
||||
} else {
|
||||
yynn2 = 3
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
@ -3602,30 +3603,36 @@ func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yy15 := *x.Suspend
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
if yyq2[3] {
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
r.EncodeBool(bool(yy15))
|
||||
yy15 := *x.Suspend
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(yy15))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("suspend"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yy17 := *x.Suspend
|
||||
yym18 := z.EncBinary()
|
||||
_ = yym18
|
||||
if false {
|
||||
if yyq2[3] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("suspend"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
r.EncodeBool(bool(yy17))
|
||||
yy17 := *x.Suspend
|
||||
yym18 := z.EncBinary()
|
||||
_ = yym18
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(yy17))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ type ScheduledJobSpec struct {
|
||||
|
||||
// Suspend flag tells the controller to suspend subsequent executions, it does
|
||||
// not apply to already started executions. Defaults to false.
|
||||
Suspend *bool `json:"suspend"`
|
||||
Suspend *bool `json:"suspend,omitempty"`
|
||||
|
||||
// JobTemplate is the object that describes the job that will be created when
|
||||
// executing a ScheduledJob.
|
||||
|
@ -46,4 +46,7 @@ func SetDefaults_ScheduledJob(obj *ScheduledJob) {
|
||||
if obj.Spec.ConcurrencyPolicy == "" {
|
||||
obj.Spec.ConcurrencyPolicy = AllowConcurrent
|
||||
}
|
||||
if obj.Spec.Suspend == nil {
|
||||
obj.Spec.Suspend = new(bool)
|
||||
}
|
||||
}
|
||||
|
@ -3494,11 +3494,12 @@ func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
const yyr2 bool = false
|
||||
yyq2[1] = x.StartingDeadlineSeconds != nil
|
||||
yyq2[2] = x.ConcurrencyPolicy != ""
|
||||
yyq2[3] = x.Suspend != nil
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(5)
|
||||
} else {
|
||||
yynn2 = 3
|
||||
yynn2 = 2
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
@ -3578,30 +3579,36 @@ func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yy15 := *x.Suspend
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
if yyq2[3] {
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
r.EncodeBool(bool(yy15))
|
||||
yy15 := *x.Suspend
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(yy15))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("suspend"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yy17 := *x.Suspend
|
||||
yym18 := z.EncBinary()
|
||||
_ = yym18
|
||||
if false {
|
||||
if yyq2[3] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("suspend"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Suspend == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
r.EncodeBool(bool(yy17))
|
||||
yy17 := *x.Suspend
|
||||
yym18 := z.EncBinary()
|
||||
_ = yym18
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(yy17))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ type ScheduledJobSpec struct {
|
||||
|
||||
// Suspend flag tells the controller to suspend subsequent executions, it does
|
||||
// not apply to already started executions. Defaults to false.
|
||||
Suspend *bool `json:"suspend" protobuf:"varint,4,opt,name=suspend"`
|
||||
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"`
|
||||
|
||||
// JobTemplate is the object that describes the job that will be created when
|
||||
// executing a ScheduledJob.
|
||||
|
@ -32,9 +32,11 @@ import (
|
||||
// Merges defaultResourceConfig with the user specified overrides and merges
|
||||
// defaultAPIResourceConfig with the corresponding user specified overrides as well.
|
||||
func BuildDefaultStorageFactory(storageConfig storagebackend.Config, defaultMediaType string, serializer runtime.StorageSerializer,
|
||||
defaultResourceEncoding *DefaultResourceEncodingConfig, storageEncodingOverrides map[string]unversioned.GroupVersion, defaultAPIResourceConfig *ResourceConfig, resourceConfigOverrides config.ConfigurationMap) (*DefaultStorageFactory, error) {
|
||||
defaultResourceEncoding *DefaultResourceEncodingConfig, storageEncodingOverrides map[string]unversioned.GroupVersion, resourceEncodingOverrides []unversioned.GroupVersionResource,
|
||||
defaultAPIResourceConfig *ResourceConfig, resourceConfigOverrides config.ConfigurationMap) (*DefaultStorageFactory, error) {
|
||||
|
||||
resourceEncodingConfig := mergeResourceEncodingConfigs(defaultResourceEncoding, storageEncodingOverrides)
|
||||
resourceEncodingConfig := mergeGroupEncodingConfigs(defaultResourceEncoding, storageEncodingOverrides)
|
||||
resourceEncodingConfig = mergeResourceEncodingConfigs(resourceEncodingConfig, resourceEncodingOverrides)
|
||||
apiResourceConfig, err := mergeAPIResourceConfigs(defaultAPIResourceConfig, resourceConfigOverrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -42,8 +44,18 @@ func BuildDefaultStorageFactory(storageConfig storagebackend.Config, defaultMedi
|
||||
return NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig), nil
|
||||
}
|
||||
|
||||
// Merges the given defaultAPIResourceConfig with the given storageEncodingOverrides.
|
||||
func mergeResourceEncodingConfigs(defaultResourceEncoding *DefaultResourceEncodingConfig, storageEncodingOverrides map[string]unversioned.GroupVersion) *DefaultResourceEncodingConfig {
|
||||
// Merges the given defaultResourceConfig with specifc GroupvVersionResource overrides.
|
||||
func mergeResourceEncodingConfigs(defaultResourceEncoding *DefaultResourceEncodingConfig, resourceEncodingOverrides []unversioned.GroupVersionResource) *DefaultResourceEncodingConfig {
|
||||
resourceEncodingConfig := defaultResourceEncoding
|
||||
for _, gvr := range resourceEncodingOverrides {
|
||||
resourceEncodingConfig.SetResourceEncoding(gvr.GroupResource(), gvr.GroupVersion(),
|
||||
unversioned.GroupVersion{Group: gvr.Group, Version: runtime.APIVersionInternal})
|
||||
}
|
||||
return resourceEncodingConfig
|
||||
}
|
||||
|
||||
// Merges the given defaultResourceConfig with specifc GroupVersion overrides.
|
||||
func mergeGroupEncodingConfigs(defaultResourceEncoding *DefaultResourceEncodingConfig, storageEncodingOverrides map[string]unversioned.GroupVersion) *DefaultResourceEncodingConfig {
|
||||
resourceEncodingConfig := defaultResourceEncoding
|
||||
for group, storageEncodingVersion := range storageEncodingOverrides {
|
||||
resourceEncodingConfig.SetVersionEncoding(group, storageEncodingVersion, unversioned.GroupVersion{Group: group, Version: runtime.APIVersionInternal})
|
||||
|
@ -69,7 +69,10 @@ var (
|
||||
kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
|
||||
|
||||
# Start the perl container to compute π to 2000 places and print it out.
|
||||
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'`)
|
||||
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
|
||||
|
||||
# Start the scheduled job to compute π to 2000 places and print it out every 5 minutes.
|
||||
kubectl run pi --schedule="* 0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'`)
|
||||
)
|
||||
|
||||
func NewCmdRun(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command {
|
||||
@ -118,6 +121,7 @@ func addRunFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("service-generator", "service/v2", "The name of the generator to use for creating a service. Only used if --expose is true")
|
||||
cmd.Flags().String("service-overrides", "", "An inline JSON override for the generated service object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. Only used if --expose is true.")
|
||||
cmd.Flags().Bool("quiet", false, "If true, suppress prompt messages.")
|
||||
cmd.Flags().String("schedule", "", "A schedule in the Cron format the job should be run with.")
|
||||
}
|
||||
|
||||
func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobra.Command, args []string, argsLenAtDash int) error {
|
||||
@ -154,6 +158,10 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob
|
||||
}
|
||||
|
||||
generatorName := cmdutil.GetFlagString(cmd, "generator")
|
||||
schedule := cmdutil.GetFlagString(cmd, "schedule")
|
||||
if len(schedule) != 0 && len(generatorName) == 0 {
|
||||
generatorName = "scheduledjob/v2alpha1"
|
||||
}
|
||||
if len(generatorName) == 0 {
|
||||
client, err := f.Client()
|
||||
if err != nil {
|
||||
|
@ -164,6 +164,7 @@ const (
|
||||
DeploymentV1Beta1GeneratorName = "deployment/v1beta1"
|
||||
JobV1Beta1GeneratorName = "job/v1beta1"
|
||||
JobV1GeneratorName = "job/v1"
|
||||
ScheduledJobV2Alpha1GeneratorName = "scheduledjob/v2alpha1"
|
||||
NamespaceV1GeneratorName = "namespace/v1"
|
||||
ResourceQuotaV1GeneratorName = "resourcequotas/v1"
|
||||
SecretV1GeneratorName = "secret/v1"
|
||||
@ -180,11 +181,12 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
|
||||
ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{},
|
||||
}
|
||||
generators["run"] = map[string]kubectl.Generator{
|
||||
RunV1GeneratorName: kubectl.BasicReplicationController{},
|
||||
RunPodV1GeneratorName: kubectl.BasicPod{},
|
||||
DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{},
|
||||
JobV1Beta1GeneratorName: kubectl.JobV1Beta1{},
|
||||
JobV1GeneratorName: kubectl.JobV1{},
|
||||
RunV1GeneratorName: kubectl.BasicReplicationController{},
|
||||
RunPodV1GeneratorName: kubectl.BasicPod{},
|
||||
DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{},
|
||||
JobV1Beta1GeneratorName: kubectl.JobV1Beta1{},
|
||||
JobV1GeneratorName: kubectl.JobV1{},
|
||||
ScheduledJobV2Alpha1GeneratorName: kubectl.ScheduledJobV2Alpha1{},
|
||||
}
|
||||
generators["autoscale"] = map[string]kubectl.Generator{
|
||||
HorizontalPodAutoscalerV1Beta1GeneratorName: kubectl.HorizontalPodAutoscalerV1Beta1{},
|
||||
|
@ -109,6 +109,7 @@ func describerMap(c *client.Client) map[unversioned.GroupKind]Describer {
|
||||
extensions.Kind("Deployment"): &DeploymentDescriber{adapter.FromUnversionedClient(c)},
|
||||
extensions.Kind("Job"): &JobDescriber{c},
|
||||
batch.Kind("Job"): &JobDescriber{c},
|
||||
batch.Kind("ScheduledJob"): &ScheduledJobDescriber{adapter.FromUnversionedClient(c)},
|
||||
apps.Kind("PetSet"): &PetSetDescriber{c},
|
||||
extensions.Kind("Ingress"): &IngressDescriber{c},
|
||||
}
|
||||
@ -1019,6 +1020,14 @@ func describeStatus(stateName string, state api.ContainerState, out io.Writer) {
|
||||
}
|
||||
}
|
||||
|
||||
func printBoolPtr(value *bool) string {
|
||||
if value != nil {
|
||||
return printBool(*value)
|
||||
}
|
||||
|
||||
return "<unset>"
|
||||
}
|
||||
|
||||
func printBool(value bool) string {
|
||||
if value {
|
||||
return "True"
|
||||
@ -1148,18 +1157,18 @@ func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, runnin
|
||||
|
||||
// JobDescriber generates information about a job and the pods it has created.
|
||||
type JobDescriber struct {
|
||||
client *client.Client
|
||||
client.Interface
|
||||
}
|
||||
|
||||
func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {
|
||||
job, err := d.client.Extensions().Jobs(namespace).Get(name)
|
||||
job, err := d.Batch().Jobs(namespace).Get(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var events *api.EventList
|
||||
if describerSettings.ShowEvents {
|
||||
events, _ = d.client.Events(namespace).Search(job)
|
||||
events, _ = d.Events(namespace).Search(job)
|
||||
}
|
||||
|
||||
return describeJob(job, events)
|
||||
@ -1194,6 +1203,92 @@ func describeJob(job *batch.Job, events *api.EventList) (string, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// ScheduledJobDescriber generates information about a scheduled job and the jobs it has created.
|
||||
type ScheduledJobDescriber struct {
|
||||
clientset.Interface
|
||||
}
|
||||
|
||||
func (d *ScheduledJobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {
|
||||
scheduledJob, err := d.Batch().ScheduledJobs(namespace).Get(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var events *api.EventList
|
||||
if describerSettings.ShowEvents {
|
||||
events, _ = d.Core().Events(namespace).Search(scheduledJob)
|
||||
}
|
||||
|
||||
return describeScheduledJob(scheduledJob, events)
|
||||
}
|
||||
|
||||
func describeScheduledJob(scheduledJob *batch.ScheduledJob, events *api.EventList) (string, error) {
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", scheduledJob.Name)
|
||||
fmt.Fprintf(out, "Namespace:\t%s\n", scheduledJob.Namespace)
|
||||
fmt.Fprintf(out, "Schedule:\t%s\n", scheduledJob.Spec.Schedule)
|
||||
fmt.Fprintf(out, "Concurrency Policy:\t%s\n", scheduledJob.Spec.ConcurrencyPolicy)
|
||||
fmt.Fprintf(out, "Suspend:\t%s\n", printBoolPtr(scheduledJob.Spec.Suspend))
|
||||
if scheduledJob.Spec.StartingDeadlineSeconds != nil {
|
||||
fmt.Fprintf(out, "Starting Deadline Seconds:\t%ds\n", *scheduledJob.Spec.StartingDeadlineSeconds)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Starting Deadline Seconds:\t<unset>\n")
|
||||
}
|
||||
describeJobTemplate(scheduledJob.Spec.JobTemplate, out)
|
||||
printLabelsMultiline(out, "Labels", scheduledJob.Labels)
|
||||
if scheduledJob.Status.LastScheduleTime != nil {
|
||||
fmt.Fprintf(out, "Last Schedule Time:\t%s\n", scheduledJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z))
|
||||
} else {
|
||||
fmt.Fprintf(out, "Last Schedule Time:\t<unset>\n")
|
||||
}
|
||||
printActiveJobs(out, "Active Jobs", scheduledJob.Status.Active)
|
||||
if events != nil {
|
||||
DescribeEvents(events, out)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func describeJobTemplate(jobTemplate batch.JobTemplateSpec, out io.Writer) {
|
||||
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&jobTemplate.Spec.Template.Spec))
|
||||
if jobTemplate.Spec.Selector != nil {
|
||||
selector, _ := unversioned.LabelSelectorAsSelector(jobTemplate.Spec.Selector)
|
||||
fmt.Fprintf(out, "Selector:\t%s\n", selector)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Selector:\t<unset>\n")
|
||||
}
|
||||
if jobTemplate.Spec.Parallelism != nil {
|
||||
fmt.Fprintf(out, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Parallelism:\t<unset>\n")
|
||||
}
|
||||
if jobTemplate.Spec.Completions != nil {
|
||||
fmt.Fprintf(out, "Completions:\t%d\n", *jobTemplate.Spec.Completions)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Completions:\t<unset>\n")
|
||||
}
|
||||
if jobTemplate.Spec.ActiveDeadlineSeconds != nil {
|
||||
fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds)
|
||||
}
|
||||
describeVolumes(jobTemplate.Spec.Template.Spec.Volumes, out, "")
|
||||
}
|
||||
|
||||
func printActiveJobs(out io.Writer, title string, jobs []api.ObjectReference) {
|
||||
fmt.Fprintf(out, "%s:\t", title)
|
||||
if len(jobs) == 0 {
|
||||
fmt.Fprintln(out, "<none>")
|
||||
return
|
||||
}
|
||||
|
||||
for i, job := range jobs {
|
||||
if i != 0 {
|
||||
fmt.Fprint(out, ", ")
|
||||
}
|
||||
fmt.Fprintf(out, "%s", job.Name)
|
||||
}
|
||||
fmt.Fprintln(out, "")
|
||||
}
|
||||
|
||||
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
|
||||
type DaemonSetDescriber struct {
|
||||
client.Interface
|
||||
|
@ -414,6 +414,7 @@ var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLA
|
||||
var replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"}
|
||||
var replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"}
|
||||
var jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"}
|
||||
var scheduledJobColumns = []string{"NAME", "SCHEDULE", "SUSPEND", "ACTIVE", "LAST-SCHEDULE"}
|
||||
var serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"}
|
||||
var ingressColumns = []string{"NAME", "HOSTS", "ADDRESS", "PORTS", "AGE"}
|
||||
var petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"}
|
||||
@ -459,6 +460,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
|
||||
h.Handler(daemonSetColumns, printDaemonSetList)
|
||||
h.Handler(jobColumns, printJob)
|
||||
h.Handler(jobColumns, printJobList)
|
||||
h.Handler(scheduledJobColumns, printScheduledJob)
|
||||
h.Handler(scheduledJobColumns, printScheduledJobList)
|
||||
h.Handler(serviceColumns, printService)
|
||||
h.Handler(serviceColumns, printServiceList)
|
||||
h.Handler(ingressColumns, printIngress)
|
||||
@ -970,6 +973,42 @@ func printJobList(list *batch.JobList, w io.Writer, options PrintOptions) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func printScheduledJob(scheduledJob *batch.ScheduledJob, w io.Writer, options PrintOptions) error {
|
||||
name := scheduledJob.Name
|
||||
namespace := scheduledJob.Namespace
|
||||
|
||||
if options.WithNamespace {
|
||||
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastScheduleTime := "<none>"
|
||||
if scheduledJob.Status.LastScheduleTime != nil {
|
||||
lastScheduleTime = scheduledJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z)
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\n",
|
||||
name,
|
||||
scheduledJob.Spec.Schedule,
|
||||
printBoolPtr(scheduledJob.Spec.Suspend),
|
||||
len(scheduledJob.Status.Active),
|
||||
lastScheduleTime,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printScheduledJobList(list *batch.ScheduledJobList, w io.Writer, options PrintOptions) error {
|
||||
for _, scheduledJob := range list.Items {
|
||||
if err := printScheduledJob(&scheduledJob, w, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string.
|
||||
// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes.
|
||||
func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/validation"
|
||||
@ -394,6 +395,104 @@ func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, err
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
type ScheduledJobV2Alpha1 struct{}
|
||||
|
||||
func (ScheduledJobV2Alpha1) ParamNames() []GeneratorParam {
|
||||
return []GeneratorParam{
|
||||
{"labels", false},
|
||||
{"default-name", false},
|
||||
{"name", true},
|
||||
{"image", true},
|
||||
{"port", false},
|
||||
{"hostport", false},
|
||||
{"stdin", false},
|
||||
{"leave-stdin-open", false},
|
||||
{"tty", false},
|
||||
{"command", false},
|
||||
{"args", false},
|
||||
{"env", false},
|
||||
{"requests", false},
|
||||
{"limits", false},
|
||||
{"restart", false},
|
||||
{"schedule", true},
|
||||
}
|
||||
}
|
||||
|
||||
func (ScheduledJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.Object, error) {
|
||||
args, err := getArgs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envs, err := getV1Envs(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params, err := getParams(genericParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
name, err := getName(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels, err := getLabels(params, true, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podSpec, err := makeV1PodSpec(params, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = updateV1PodContainers(params, args, envs, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin
|
||||
|
||||
if err := updateV1PodPorts(params, podSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restartPolicy := v1.RestartPolicy(params["restart"])
|
||||
if len(restartPolicy) == 0 {
|
||||
restartPolicy = v1.RestartPolicyNever
|
||||
}
|
||||
podSpec.RestartPolicy = restartPolicy
|
||||
|
||||
scheduledJob := batchv2alpha1.ScheduledJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: batchv2alpha1.ScheduledJobSpec{
|
||||
Schedule: params["schedule"],
|
||||
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
|
||||
JobTemplate: batchv2alpha1.JobTemplateSpec{
|
||||
Spec: batchv2alpha1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: *podSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &scheduledJob, nil
|
||||
}
|
||||
|
||||
type BasicReplicationController struct{}
|
||||
|
||||
func (BasicReplicationController) ParamNames() []GeneratorParam {
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
batchapiv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
jobetcd "k8s.io/kubernetes/pkg/registry/job/etcd"
|
||||
scheduledjobetcd "k8s.io/kubernetes/pkg/registry/scheduledjob/etcd"
|
||||
)
|
||||
|
||||
type BatchRESTStorageProvider struct{}
|
||||
@ -65,5 +66,10 @@ func (p BatchRESTStorageProvider) v2alpha1Storage(apiResourceConfigSource generi
|
||||
storage["jobs"] = jobsStorage
|
||||
storage["jobs/status"] = jobsStatusStorage
|
||||
}
|
||||
if apiResourceConfigSource.ResourceEnabled(version.WithResource("scheduledjobs")) {
|
||||
scheduledJobsStorage, scheduledJobsStatusStorage := scheduledjobetcd.NewREST(restOptionsGetter(batch.Resource("scheduledjobs")))
|
||||
storage["scheduledjobs"] = scheduledJobsStorage
|
||||
storage["scheduledjobs/status"] = scheduledJobsStatusStorage
|
||||
}
|
||||
return storage
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user