From 30f5b93d2f7e940f545a1572bcaa281f9b22c10c Mon Sep 17 00:00:00 2001 From: Ananya Kumar Date: Thu, 27 Aug 2015 10:18:16 -0700 Subject: [PATCH 1/3] Add Kubectl support for Daemon --- pkg/kubectl/describe.go | 103 ++++++++++++++++++++++++++++++-- pkg/kubectl/kubectl.go | 1 + pkg/kubectl/resource_printer.go | 57 ++++++++++++++++++ pkg/kubectl/stop.go | 65 ++++++++++++++++++++ 4 files changed, 221 insertions(+), 5 deletions(-) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 65be81d3d40..2c649010bd0 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" @@ -69,6 +70,7 @@ func describerMap(c *client.Client) map[string]Describer { m := map[string]Describer{ "Pod": &PodDescriber{c}, "ReplicationController": &ReplicationControllerDescriber{c}, + "Daemon": &DaemonDescriber{c}, "Secret": &SecretDescriber{c}, "Service": &ServiceDescriber{c}, "ServiceAccount": &ServiceAccountDescriber{c}, @@ -128,6 +130,7 @@ func init() { describePod, describeService, describeReplicationController, + describeDaemon, describeNode, describeNamespace, ) @@ -423,6 +426,7 @@ type PodDescriber struct { func (d *PodDescriber) Describe(namespace, name string) (string, error) { rc := d.ReplicationControllers(namespace) + dc := d.Daemons(namespace) pc := d.Pods(namespace) pod, err := pc.Get(name) @@ -453,11 +457,15 @@ func (d *PodDescriber) Describe(namespace, name string) (string, error) { if err != nil { return "", err } + daemons, err := getDaemonsForLabels(dc, labels.Set(pod.Labels)) + if err != nil { + return "", err + } - return describePod(pod, rcs, events) + return describePod(pod, rcs, daemons, events) } -func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.EventList) (string, error) { +func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi.Daemon, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pod.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) @@ -477,6 +485,7 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs)) + fmt.Fprintf(out, "Daemons:\t%s\n", printDaemonsByLabels(daemons)) fmt.Fprintf(out, "Containers:\n") describeContainers(pod, out) if len(pod.Status.Conditions) > 0 { @@ -823,7 +832,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string) (strin return "", err } - running, waiting, succeeded, failed, err := getPodStatusForReplicationController(pc, controller) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, controller.Spec.Selector) if err != nil { return "", err } @@ -896,6 +905,52 @@ func describeJob(job *experimental.Job, events *api.EventList) (string, error) { }) } +// DaemonDescriber generates information about a daemon and the pods it has created. +type DaemonDescriber struct { + client.Interface +} + +func (d *DaemonDescriber) Describe(namespace, name string) (string, error) { + dc := d.Daemons(namespace) + pc := d.Pods(namespace) + + daemon, err := dc.Get(name) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, daemon.Spec.Selector) + if err != nil { + return "", err + } + + events, _ := d.Events(namespace).Search(daemon) + + return describeDaemon(daemon, events, running, waiting, succeeded, failed) +} + +func describeDaemon(daemon *expapi.Daemon, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) + if daemon.Spec.Template != nil { + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec)) + } else { + fmt.Fprintf(out, "Image(s):\t%s\n", "") + } + fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Selector)) + fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(daemon.Labels)) + fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) + fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) + fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + // SecretDescriber generates information about a secret type SecretDescriber struct { client.Interface @@ -1347,6 +1402,30 @@ func DescribeEvents(el *api.EventList, w io.Writer) { } } +// Get all daemons whose selectors would match a given set of labels. +// TODO: Move this to pkg/client and ideally implement it server-side (instead +// of getting all RC's and searching through them manually). +// TODO: write an interface for controllers and fuse getReplicationControllersForLabels +// and getDaemonsForLabels. +func getDaemonsForLabels(c client.DaemonInterface, labelsToMatch labels.Labels) ([]expapi.Daemon, error) { + // Get all daemon controllers. + // TODO this needs a namespace scope as argument + daemons, err := c.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("error getting daemons: %v", err) + } + + // Find the ones that match labelsToMatch. + var matchingDaemons []expapi.Daemon + for _, daemon := range daemons.Items { + selector := labels.SelectorFromSet(daemon.Spec.Selector) + if selector.Matches(labelsToMatch) { + matchingDaemons = append(matchingDaemons, daemon) + } + } + return matchingDaemons, nil +} + // Get all replication controllers whose selectors would match a given set of // labels. // TODO Move this to pkg/client and ideally implement it server-side (instead @@ -1370,6 +1449,20 @@ func getReplicationControllersForLabels(c client.ReplicationControllerInterface, return matchingRCs, nil } +func printDaemonsByLabels(matchingDaemons []expapi.Daemon) string { + // Format the matching RC's into strings. + var daemonStrings []string + for _, daemon := range matchingDaemons { + daemonStrings = append(daemonStrings, fmt.Sprintf("%s (%d desired, %d nodes scheduled, %d nodes misscheduled)", daemon.Name, daemon.Status.DesiredNumberScheduled, daemon.Status.CurrentNumberScheduled, daemon.Status.NumberMisscheduled)) + } + + list := strings.Join(daemonStrings, ", ") + if list == "" { + return "" + } + return list +} + func printReplicationControllersByLabels(matchingRCs []api.ReplicationController) string { // Format the matching RC's into strings. var rcStrings []string @@ -1384,8 +1477,8 @@ func printReplicationControllersByLabels(matchingRCs []api.ReplicationController return list } -func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) { - rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()) +func getPodStatusForController(c client.PodInterface, selector map[string]string) (running, waiting, succeeded, failed int, err error) { + rcPods, err := c.List(labels.SelectorFromSet(selector), fields.Everything()) if err != nil { return } diff --git a/pkg/kubectl/kubectl.go b/pkg/kubectl/kubectl.go index 15c9c0d6cb4..a6fe0f743c4 100644 --- a/pkg/kubectl/kubectl.go +++ b/pkg/kubectl/kubectl.go @@ -105,6 +105,7 @@ func expandResourceShortcut(resource string) string { "pvc": "persistentvolumeclaims", "quota": "resourcequotas", "rc": "replicationcontrollers", + "dm": "daemons", "svc": "services", } if expanded, ok := shortForms[resource]; ok { diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 81f095cd01f..20a0aaba9bc 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -384,6 +384,7 @@ var jobColumns = []string{"JOB", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "SUCCES var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} +var daemonColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} var limitRangeColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"} @@ -406,6 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(podTemplateColumns, printPodTemplateList) h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationControllerList) + h.Handler(daemonColumns, printDaemon) + h.Handler(daemonColumns, printDaemonList) h.Handler(jobColumns, printJob) h.Handler(jobColumns, printJobList) h.Handler(serviceColumns, printService) @@ -810,6 +813,60 @@ func printServiceList(list *api.ServiceList, w io.Writer, withNamespace bool, wi return nil } +func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + name := daemon.Name + namespace := daemon.Namespace + + containers := daemon.Spec.Template.Spec.Containers + var firstContainer api.Container + if len(containers) > 0 { + firstContainer, containers = containers[0], containers[1:] + } + + if withNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + name, + firstContainer.Name, + firstContainer.Image, + labels.FormatLabels(daemon.Spec.Selector), + labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, appendLabels(daemon.Labels, columnLabels)); err != nil { + return err + } + + // Lay out all the other containers on separate lines. + extraLinePrefix := "\t" + if withNamespace { + extraLinePrefix = "\t\t" + } + for _, container := range containers { + _, err := fmt.Fprintf(w, "%s%s\t%s\t%s\t%s", extraLinePrefix, container.Name, container.Image, "", "") + if err != nil { + return err + } + if _, err := fmt.Fprint(w, appendLabelTabs(columnLabels)); err != nil { + return err + } + } + return nil +} + +func printDaemonList(list *expapi.DaemonList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + for _, daemon := range list.Items { + if err := printDaemon(&daemon, w, withNamespace, wide, showAll, columnLabels); err != nil { + return err + } + } + return nil +} + func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { name := endpoints.Name namespace := endpoints.Namespace diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index bca8ff277d9..490e98ccdb1 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -21,10 +21,13 @@ import ( "strings" "time" + fuzz "github.com/google/gofuzz" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/wait" ) const ( @@ -56,6 +59,8 @@ func ReaperFor(kind string, c client.Interface) (Reaper, error) { switch kind { case "ReplicationController": return &ReplicationControllerReaper{c, Interval, Timeout}, nil + case "Daemon": + return &DaemonReaper{c, Interval, Timeout}, nil case "Pod": return &PodReaper{c}, nil case "Service": @@ -72,6 +77,10 @@ type ReplicationControllerReaper struct { client.Interface pollInterval, timeout time.Duration } +type DaemonReaper struct { + client.Interface + pollInterval, timeout time.Duration +} type PodReaper struct { client.Interface } @@ -167,6 +176,62 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout return fmt.Sprintf("%s stopped", name), nil } +func (reaper *DaemonReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { + // Retrieve the daemon we want to stop. + daemonClient := reaper.Daemons(namespace) + daemon, err := daemonClient.Get(name) + if err != nil { + return "", err + } + + // Update the daemon to select for a non-existent NodeName. + // The daemon manager will then kill all the daemon pods corresponding to daemon daemon. + nodes, err := reaper.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + return "", err + } + var fuzzer = fuzz.New() + var nameExists bool + numRetries := 1 + for try := 0; try <= numRetries; try++ { + var nodeName string + fuzzer.Fuzz(&nodeName) + nameExists = false + for _, node := range nodes.Items { + nameExists = nameExists || node.Name == nodeName + } + if !nameExists { + daemon.Spec.Template.Spec.NodeName = nodeName + break + } + } + if nameExists { + // Probability of reaching here is extremely low, most likely indicates a programming bug/library error. + return "", fmt.Errorf("Failed to stop node.") + } + daemonClient.Update(daemon) + + // Wait for the daemon manager to kill all the daemon's daemon pods. + daemonPodsKilled := func() (bool, error) { + updatedDc, err := daemonClient.Get(name) + if err != nil { + // We don't return an error, because returning an error will abort wait.Poll, but + // if there's an error, we want to try getting the daemon again. + return false, nil + } + return updatedDc.Status.CurrentNumberScheduled+updatedDc.Status.NumberMisscheduled == 0, nil + } + if err := wait.Poll(reaper.pollInterval, reaper.timeout, daemonPodsKilled); err != nil { + return "", err + } + + // Finally, kill the daemon. + if err := daemonClient.Delete(name); err != nil { + return "", err + } + return fmt.Sprintf("%s stopped", name), nil +} + func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { pods := reaper.Pods(namespace) _, err := pods.Get(name) From 54b0faf39a66870c7f8fefa66ba7796fbe7878be Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sat, 12 Sep 2015 09:46:10 -0700 Subject: [PATCH 2/3] rebase onto head --- contrib/completions/bash/kubectl | 4 ++ pkg/kubectl/cmd/cmd.go | 1 + pkg/kubectl/describe.go | 67 +++++++++++--------------------- pkg/kubectl/kubectl.go | 2 +- pkg/kubectl/resource_printer.go | 28 ++++++------- pkg/kubectl/stop.go | 57 ++++++++++++--------------- test/e2e/daemon_set.go | 2 +- 7 files changed, 68 insertions(+), 93 deletions(-) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index a8c9c79e4e7..5295dc6b33b 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -287,6 +287,7 @@ _kubectl_get() must_have_one_flag=() must_have_one_noun=() must_have_one_noun+=("componentstatus") + must_have_one_noun+=("daemonset") must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") @@ -328,6 +329,7 @@ _kubectl_describe() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("daemonset") must_have_one_noun+=("limitrange") must_have_one_noun+=("minion") must_have_one_noun+=("namespace") @@ -456,6 +458,7 @@ _kubectl_delete() must_have_one_flag=() must_have_one_noun=() must_have_one_noun+=("componentstatus") + must_have_one_noun+=("daemonset") must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") @@ -827,6 +830,7 @@ _kubectl_label() must_have_one_flag=() must_have_one_noun=() must_have_one_noun+=("componentstatus") + must_have_one_noun+=("daemonset") must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 3865de98271..3e40a225820 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -112,6 +112,7 @@ __custom_func() { valid_resources = `Valid resource types include: * pods (aka 'po') * replicationcontrollers (aka 'rc') + * daemonsets (aka 'ds') * services (aka 'svc') * events (aka 'ev') * nodes (aka 'no') diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 2c649010bd0..6a99355758c 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" @@ -70,7 +69,7 @@ func describerMap(c *client.Client) map[string]Describer { m := map[string]Describer{ "Pod": &PodDescriber{c}, "ReplicationController": &ReplicationControllerDescriber{c}, - "Daemon": &DaemonDescriber{c}, + "DaemonSet": &DaemonSetDescriber{c}, "Secret": &SecretDescriber{c}, "Service": &ServiceDescriber{c}, "ServiceAccount": &ServiceAccountDescriber{c}, @@ -130,7 +129,7 @@ func init() { describePod, describeService, describeReplicationController, - describeDaemon, + describeDaemonSet, describeNode, describeNamespace, ) @@ -426,7 +425,6 @@ type PodDescriber struct { func (d *PodDescriber) Describe(namespace, name string) (string, error) { rc := d.ReplicationControllers(namespace) - dc := d.Daemons(namespace) pc := d.Pods(namespace) pod, err := pc.Get(name) @@ -457,15 +455,11 @@ func (d *PodDescriber) Describe(namespace, name string) (string, error) { if err != nil { return "", err } - daemons, err := getDaemonsForLabels(dc, labels.Set(pod.Labels)) - if err != nil { - return "", err - } - return describePod(pod, rcs, daemons, events) + return describePod(pod, rcs, events) } -func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi.Daemon, events *api.EventList) (string, error) { +func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pod.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) @@ -485,7 +479,6 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs)) - fmt.Fprintf(out, "Daemons:\t%s\n", printDaemonsByLabels(daemons)) fmt.Fprintf(out, "Containers:\n") describeContainers(pod, out) if len(pod.Status.Conditions) > 0 { @@ -905,13 +898,13 @@ func describeJob(job *experimental.Job, events *api.EventList) (string, error) { }) } -// DaemonDescriber generates information about a daemon and the pods it has created. -type DaemonDescriber struct { +// DaemonSetDescriber generates information about a daemon set and the pods it has created. +type DaemonSetDescriber struct { client.Interface } -func (d *DaemonDescriber) Describe(namespace, name string) (string, error) { - dc := d.Daemons(namespace) +func (d *DaemonSetDescriber) Describe(namespace, name string) (string, error) { + dc := d.Experimental().DaemonSets(namespace) pc := d.Pods(namespace) daemon, err := dc.Get(name) @@ -926,10 +919,10 @@ func (d *DaemonDescriber) Describe(namespace, name string) (string, error) { events, _ := d.Events(namespace).Search(daemon) - return describeDaemon(daemon, events, running, waiting, succeeded, failed) + return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) } -func describeDaemon(daemon *expapi.Daemon, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeDaemonSet(daemon *experimental.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) if daemon.Spec.Template != nil { @@ -1402,28 +1395,28 @@ func DescribeEvents(el *api.EventList, w io.Writer) { } } -// Get all daemons whose selectors would match a given set of labels. +// Get all daemon set whose selectors would match a given set of labels. // TODO: Move this to pkg/client and ideally implement it server-side (instead -// of getting all RC's and searching through them manually). +// of getting all DS's and searching through them manually). // TODO: write an interface for controllers and fuse getReplicationControllersForLabels -// and getDaemonsForLabels. -func getDaemonsForLabels(c client.DaemonInterface, labelsToMatch labels.Labels) ([]expapi.Daemon, error) { - // Get all daemon controllers. - // TODO this needs a namespace scope as argument - daemons, err := c.List(labels.Everything()) +// and getDaemonSetsForLabels. +func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.Labels) ([]experimental.DaemonSet, error) { + // Get all daemon sets + // TODO: this needs a namespace scope as argument + dss, err := c.List(labels.Everything()) if err != nil { - return nil, fmt.Errorf("error getting daemons: %v", err) + return nil, fmt.Errorf("error getting daemon set: %v", err) } // Find the ones that match labelsToMatch. - var matchingDaemons []expapi.Daemon - for _, daemon := range daemons.Items { - selector := labels.SelectorFromSet(daemon.Spec.Selector) + var matchingDaemonSets []experimental.DaemonSet + for _, ds := range dss.Items { + selector := labels.SelectorFromSet(ds.Spec.Selector) if selector.Matches(labelsToMatch) { - matchingDaemons = append(matchingDaemons, daemon) + matchingDaemonSets = append(matchingDaemonSets, ds) } } - return matchingDaemons, nil + return matchingDaemonSets, nil } // Get all replication controllers whose selectors would match a given set of @@ -1449,20 +1442,6 @@ func getReplicationControllersForLabels(c client.ReplicationControllerInterface, return matchingRCs, nil } -func printDaemonsByLabels(matchingDaemons []expapi.Daemon) string { - // Format the matching RC's into strings. - var daemonStrings []string - for _, daemon := range matchingDaemons { - daemonStrings = append(daemonStrings, fmt.Sprintf("%s (%d desired, %d nodes scheduled, %d nodes misscheduled)", daemon.Name, daemon.Status.DesiredNumberScheduled, daemon.Status.CurrentNumberScheduled, daemon.Status.NumberMisscheduled)) - } - - list := strings.Join(daemonStrings, ", ") - if list == "" { - return "" - } - return list -} - func printReplicationControllersByLabels(matchingRCs []api.ReplicationController) string { // Format the matching RC's into strings. var rcStrings []string diff --git a/pkg/kubectl/kubectl.go b/pkg/kubectl/kubectl.go index a6fe0f743c4..5f6a8f6dc7c 100644 --- a/pkg/kubectl/kubectl.go +++ b/pkg/kubectl/kubectl.go @@ -105,7 +105,7 @@ func expandResourceShortcut(resource string) string { "pvc": "persistentvolumeclaims", "quota": "resourcequotas", "rc": "replicationcontrollers", - "dm": "daemons", + "ds": "daemonsets", "svc": "services", } if expanded, ok := shortForms[resource]; ok { diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 20a0aaba9bc..1dac87dd347 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -384,7 +384,7 @@ var jobColumns = []string{"JOB", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "SUCCES var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} -var daemonColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} +var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} var limitRangeColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"} @@ -407,8 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(podTemplateColumns, printPodTemplateList) h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationControllerList) - h.Handler(daemonColumns, printDaemon) - h.Handler(daemonColumns, printDaemonList) + h.Handler(daemonSetColumns, printDaemonSet) + h.Handler(daemonSetColumns, printDaemonSetList) h.Handler(jobColumns, printJob) h.Handler(jobColumns, printJobList) h.Handler(serviceColumns, printService) @@ -813,11 +813,11 @@ func printServiceList(list *api.ServiceList, w io.Writer, withNamespace bool, wi return nil } -func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { - name := daemon.Name - namespace := daemon.Namespace +func printDaemonSet(ds *experimental.DaemonSet, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + name := ds.Name + namespace := ds.Namespace - containers := daemon.Spec.Template.Spec.Containers + containers := ds.Spec.Template.Spec.Containers var firstContainer api.Container if len(containers) > 0 { firstContainer, containers = containers[0], containers[1:] @@ -828,16 +828,16 @@ func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bo return err } } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, firstContainer.Name, firstContainer.Image, - labels.FormatLabels(daemon.Spec.Selector), - labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector), + labels.FormatLabels(ds.Spec.Selector), + labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector), ); err != nil { return err } - if _, err := fmt.Fprint(w, appendLabels(daemon.Labels, columnLabels)); err != nil { + if _, err := fmt.Fprint(w, appendLabels(ds.Labels, columnLabels)); err != nil { return err } @@ -858,9 +858,9 @@ func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bo return nil } -func printDaemonList(list *expapi.DaemonList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { - for _, daemon := range list.Items { - if err := printDaemon(&daemon, w, withNamespace, wide, showAll, columnLabels); err != nil { +func printDaemonSetList(list *experimental.DaemonSetList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + for _, ds := range list.Items { + if err := printDaemonSet(&ds, w, withNamespace, wide, showAll, columnLabels); err != nil { return err } } diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index 490e98ccdb1..fa4a371c659 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -59,8 +59,8 @@ func ReaperFor(kind string, c client.Interface) (Reaper, error) { switch kind { case "ReplicationController": return &ReplicationControllerReaper{c, Interval, Timeout}, nil - case "Daemon": - return &DaemonReaper{c, Interval, Timeout}, nil + case "DaemonSet": + return &DaemonSetReaper{c, Interval, Timeout}, nil case "Pod": return &PodReaper{c}, nil case "Service": @@ -77,7 +77,7 @@ type ReplicationControllerReaper struct { client.Interface pollInterval, timeout time.Duration } -type DaemonReaper struct { +type DaemonSetReaper struct { client.Interface pollInterval, timeout time.Duration } @@ -176,57 +176,48 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout return fmt.Sprintf("%s stopped", name), nil } -func (reaper *DaemonReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { - // Retrieve the daemon we want to stop. - daemonClient := reaper.Daemons(namespace) - daemon, err := daemonClient.Get(name) +func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { + daemon, err := reaper.Experimental().DaemonSets(namespace).Get(name) if err != nil { return "", err } - // Update the daemon to select for a non-existent NodeName. - // The daemon manager will then kill all the daemon pods corresponding to daemon daemon. + // Update the daemon set to select for a non-existent NodeName. + // The daemon set controller will then kill all the daemon pods corresponding to daemon set. nodes, err := reaper.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { return "", err } var fuzzer = fuzz.New() var nameExists bool - numRetries := 1 - for try := 0; try <= numRetries; try++ { - var nodeName string - fuzzer.Fuzz(&nodeName) - nameExists = false - for _, node := range nodes.Items { - nameExists = nameExists || node.Name == nodeName - } - if !nameExists { - daemon.Spec.Template.Spec.NodeName = nodeName - break - } + + var nodeName string + fuzzer.Fuzz(&nodeName) + nameExists = false + for _, node := range nodes.Items { + nameExists = nameExists || node.Name == nodeName } if nameExists { // Probability of reaching here is extremely low, most likely indicates a programming bug/library error. - return "", fmt.Errorf("Failed to stop node.") + return "", fmt.Errorf("Name collision generating an unused node name. Please retry this operation.") } - daemonClient.Update(daemon) - // Wait for the daemon manager to kill all the daemon's daemon pods. - daemonPodsKilled := func() (bool, error) { - updatedDc, err := daemonClient.Get(name) + daemon.Spec.Template.Spec.NodeName = nodeName + + reaper.Experimental().DaemonSets(namespace).Update(daemon) + + // Wait for the daemon set controller to kill all the daemon pods. + if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { + updatedDS, err := reaper.Experimental().DaemonSets(namespace).Get(name) if err != nil { - // We don't return an error, because returning an error will abort wait.Poll, but - // if there's an error, we want to try getting the daemon again. return false, nil } - return updatedDc.Status.CurrentNumberScheduled+updatedDc.Status.NumberMisscheduled == 0, nil - } - if err := wait.Poll(reaper.pollInterval, reaper.timeout, daemonPodsKilled); err != nil { + return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil + }); err != nil { return "", err } - // Finally, kill the daemon. - if err := daemonClient.Delete(name); err != nil { + if err := reaper.Experimental().DaemonSets(namespace).Delete(name); err != nil { return "", err } return fmt.Sprintf("%s stopped", name), nil diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 05f412768ce..43cf4fb83bb 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -210,7 +210,7 @@ func testDaemonSets(f *Framework) { err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name})) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") - By("remove the node selector and wait for") + By("remove the node selector and wait for daemons to be unscheduled") newNode, err = nodeClient.Get(newNode.Name) Expect(err).NotTo(HaveOccurred(), "error getting node") newNode.Labels = map[string]string{} From 52ccf54dab9aa831c15d843857b307fc0d484480 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Thu, 27 Aug 2015 10:18:21 -0700 Subject: [PATCH 3/3] Add daemon reaper test to kubectl --- test/e2e/daemon_set.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 43cf4fb83bb..727959a830f 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" @@ -148,6 +149,15 @@ func testDaemonSets(f *Framework) { }, }) Expect(err).NotTo(HaveOccurred()) + defer func() { + Logf("Check that reaper kills all daemon pods for %s", simpleDSName) + dsReaper, err := kubectl.ReaperFor("DaemonSet", c) + Expect(err).NotTo(HaveOccurred()) + _, err = dsReaper.Stop(ns, simpleDSName, 0, nil) + Expect(err).NotTo(HaveOccurred()) + err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label)) + Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") + }() By("Check that daemon pods launch on every node of the cluster.") Expect(err).NotTo(HaveOccurred()) @@ -218,4 +228,7 @@ func testDaemonSets(f *Framework) { Expect(err).NotTo(HaveOccurred()) Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))). NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") + + By("We should now be able to delete the daemon set.") + Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred()) }