Merge pull request #13183 from mikedanese/daemon_kubectl

Add daemonset support to kubectl
This commit is contained in:
Mike Danese 2015-09-16 19:02:36 -07:00
commit 77a06cbf05
7 changed files with 208 additions and 4 deletions

View File

@ -287,6 +287,7 @@ _kubectl_get()
must_have_one_flag=() must_have_one_flag=()
must_have_one_noun=() must_have_one_noun=()
must_have_one_noun+=("componentstatus") must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment") must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints") must_have_one_noun+=("endpoints")
must_have_one_noun+=("event") must_have_one_noun+=("event")
@ -328,6 +329,7 @@ _kubectl_describe()
must_have_one_flag=() must_have_one_flag=()
must_have_one_noun=() must_have_one_noun=()
must_have_one_noun+=("daemonset")
must_have_one_noun+=("limitrange") must_have_one_noun+=("limitrange")
must_have_one_noun+=("minion") must_have_one_noun+=("minion")
must_have_one_noun+=("namespace") must_have_one_noun+=("namespace")
@ -456,6 +458,7 @@ _kubectl_delete()
must_have_one_flag=() must_have_one_flag=()
must_have_one_noun=() must_have_one_noun=()
must_have_one_noun+=("componentstatus") must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment") must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints") must_have_one_noun+=("endpoints")
must_have_one_noun+=("event") must_have_one_noun+=("event")
@ -827,6 +830,7 @@ _kubectl_label()
must_have_one_flag=() must_have_one_flag=()
must_have_one_noun=() must_have_one_noun=()
must_have_one_noun+=("componentstatus") must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment") must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints") must_have_one_noun+=("endpoints")
must_have_one_noun+=("event") must_have_one_noun+=("event")

View File

@ -112,6 +112,7 @@ __custom_func() {
valid_resources = `Valid resource types include: valid_resources = `Valid resource types include:
* pods (aka 'po') * pods (aka 'po')
* replicationcontrollers (aka 'rc') * replicationcontrollers (aka 'rc')
* daemonsets (aka 'ds')
* services (aka 'svc') * services (aka 'svc')
* events (aka 'ev') * events (aka 'ev')
* nodes (aka 'no') * nodes (aka 'no')

View File

@ -69,6 +69,7 @@ func describerMap(c *client.Client) map[string]Describer {
m := map[string]Describer{ m := map[string]Describer{
"Pod": &PodDescriber{c}, "Pod": &PodDescriber{c},
"ReplicationController": &ReplicationControllerDescriber{c}, "ReplicationController": &ReplicationControllerDescriber{c},
"DaemonSet": &DaemonSetDescriber{c},
"Secret": &SecretDescriber{c}, "Secret": &SecretDescriber{c},
"Service": &ServiceDescriber{c}, "Service": &ServiceDescriber{c},
"ServiceAccount": &ServiceAccountDescriber{c}, "ServiceAccount": &ServiceAccountDescriber{c},
@ -128,6 +129,7 @@ func init() {
describePod, describePod,
describeService, describeService,
describeReplicationController, describeReplicationController,
describeDaemonSet,
describeNode, describeNode,
describeNamespace, describeNamespace,
) )
@ -823,7 +825,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string) (strin
return "", err return "", err
} }
running, waiting, succeeded, failed, err := getPodStatusForReplicationController(pc, controller) running, waiting, succeeded, failed, err := getPodStatusForController(pc, controller.Spec.Selector)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -896,6 +898,52 @@ func describeJob(job *experimental.Job, events *api.EventList) (string, error) {
}) })
} }
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
type DaemonSetDescriber struct {
client.Interface
}
func (d *DaemonSetDescriber) Describe(namespace, name string) (string, error) {
dc := d.Experimental().DaemonSets(namespace)
pc := d.Pods(namespace)
daemon, err := dc.Get(name)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, daemon.Spec.Selector)
if err != nil {
return "", err
}
events, _ := d.Events(namespace).Search(daemon)
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
}
func describeDaemonSet(daemon *experimental.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", daemon.Name)
if daemon.Spec.Template != nil {
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec))
} else {
fmt.Fprintf(out, "Image(s):\t%s\n", "<no template>")
}
fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Selector))
fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector))
fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(daemon.Labels))
fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled)
fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled)
fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled)
fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
if events != nil {
DescribeEvents(events, out)
}
return nil
})
}
// SecretDescriber generates information about a secret // SecretDescriber generates information about a secret
type SecretDescriber struct { type SecretDescriber struct {
client.Interface client.Interface
@ -1347,6 +1395,30 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
} }
} }
// Get all daemon set whose selectors would match a given set of labels.
// TODO: Move this to pkg/client and ideally implement it server-side (instead
// of getting all DS's and searching through them manually).
// TODO: write an interface for controllers and fuse getReplicationControllersForLabels
// and getDaemonSetsForLabels.
func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.Labels) ([]experimental.DaemonSet, error) {
// Get all daemon sets
// TODO: this needs a namespace scope as argument
dss, err := c.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("error getting daemon set: %v", err)
}
// Find the ones that match labelsToMatch.
var matchingDaemonSets []experimental.DaemonSet
for _, ds := range dss.Items {
selector := labels.SelectorFromSet(ds.Spec.Selector)
if selector.Matches(labelsToMatch) {
matchingDaemonSets = append(matchingDaemonSets, ds)
}
}
return matchingDaemonSets, nil
}
// Get all replication controllers whose selectors would match a given set of // Get all replication controllers whose selectors would match a given set of
// labels. // labels.
// TODO Move this to pkg/client and ideally implement it server-side (instead // TODO Move this to pkg/client and ideally implement it server-side (instead
@ -1384,8 +1456,8 @@ func printReplicationControllersByLabels(matchingRCs []api.ReplicationController
return list return list
} }
func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) { func getPodStatusForController(c client.PodInterface, selector map[string]string) (running, waiting, succeeded, failed int, err error) {
rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()) rcPods, err := c.List(labels.SelectorFromSet(selector), fields.Everything())
if err != nil { if err != nil {
return return
} }

View File

@ -100,6 +100,7 @@ func expandResourceShortcut(resource string) string {
"pvc": "persistentvolumeclaims", "pvc": "persistentvolumeclaims",
"quota": "resourcequotas", "quota": "resourcequotas",
"rc": "replicationcontrollers", "rc": "replicationcontrollers",
"ds": "daemonsets",
"svc": "services", "svc": "services",
} }
if expanded, ok := shortForms[resource]; ok { if expanded, ok := shortForms[resource]; ok {

View File

@ -384,6 +384,7 @@ var jobColumns = []string{"JOB", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "SUCCES
var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"} var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"}
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"}
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
var limitRangeColumns = []string{"NAME", "AGE"} var limitRangeColumns = []string{"NAME", "AGE"}
var resourceQuotaColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"}
@ -406,6 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
h.Handler(podTemplateColumns, printPodTemplateList) h.Handler(podTemplateColumns, printPodTemplateList)
h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationController)
h.Handler(replicationControllerColumns, printReplicationControllerList) h.Handler(replicationControllerColumns, printReplicationControllerList)
h.Handler(daemonSetColumns, printDaemonSet)
h.Handler(daemonSetColumns, printDaemonSetList)
h.Handler(jobColumns, printJob) h.Handler(jobColumns, printJob)
h.Handler(jobColumns, printJobList) h.Handler(jobColumns, printJobList)
h.Handler(serviceColumns, printService) h.Handler(serviceColumns, printService)
@ -810,6 +813,60 @@ func printServiceList(list *api.ServiceList, w io.Writer, withNamespace bool, wi
return nil return nil
} }
func printDaemonSet(ds *experimental.DaemonSet, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
name := ds.Name
namespace := ds.Namespace
containers := ds.Spec.Template.Spec.Containers
var firstContainer api.Container
if len(containers) > 0 {
firstContainer, containers = containers[0], containers[1:]
}
if withNamespace {
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
return err
}
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s",
name,
firstContainer.Name,
firstContainer.Image,
labels.FormatLabels(ds.Spec.Selector),
labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector),
); err != nil {
return err
}
if _, err := fmt.Fprint(w, appendLabels(ds.Labels, columnLabels)); err != nil {
return err
}
// Lay out all the other containers on separate lines.
extraLinePrefix := "\t"
if withNamespace {
extraLinePrefix = "\t\t"
}
for _, container := range containers {
_, err := fmt.Fprintf(w, "%s%s\t%s\t%s\t%s", extraLinePrefix, container.Name, container.Image, "", "")
if err != nil {
return err
}
if _, err := fmt.Fprint(w, appendLabelTabs(columnLabels)); err != nil {
return err
}
}
return nil
}
func printDaemonSetList(list *experimental.DaemonSetList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
for _, ds := range list.Items {
if err := printDaemonSet(&ds, w, withNamespace, wide, showAll, columnLabels); err != nil {
return err
}
}
return nil
}
func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
name := endpoints.Name name := endpoints.Name
namespace := endpoints.Namespace namespace := endpoints.Namespace

View File

@ -21,10 +21,13 @@ import (
"strings" "strings"
"time" "time"
fuzz "github.com/google/gofuzz"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
) )
const ( const (
@ -56,6 +59,8 @@ func ReaperFor(kind string, c client.Interface) (Reaper, error) {
switch kind { switch kind {
case "ReplicationController": case "ReplicationController":
return &ReplicationControllerReaper{c, Interval, Timeout}, nil return &ReplicationControllerReaper{c, Interval, Timeout}, nil
case "DaemonSet":
return &DaemonSetReaper{c, Interval, Timeout}, nil
case "Pod": case "Pod":
return &PodReaper{c}, nil return &PodReaper{c}, nil
case "Service": case "Service":
@ -72,6 +77,10 @@ type ReplicationControllerReaper struct {
client.Interface client.Interface
pollInterval, timeout time.Duration pollInterval, timeout time.Duration
} }
type DaemonSetReaper struct {
client.Interface
pollInterval, timeout time.Duration
}
type PodReaper struct { type PodReaper struct {
client.Interface client.Interface
} }
@ -167,6 +176,53 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
return fmt.Sprintf("%s stopped", name), nil return fmt.Sprintf("%s stopped", name), nil
} }
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
daemon, err := reaper.Experimental().DaemonSets(namespace).Get(name)
if err != nil {
return "", err
}
// Update the daemon set to select for a non-existent NodeName.
// The daemon set controller will then kill all the daemon pods corresponding to daemon set.
nodes, err := reaper.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return "", err
}
var fuzzer = fuzz.New()
var nameExists bool
var nodeName string
fuzzer.Fuzz(&nodeName)
nameExists = false
for _, node := range nodes.Items {
nameExists = nameExists || node.Name == nodeName
}
if nameExists {
// Probability of reaching here is extremely low, most likely indicates a programming bug/library error.
return "", fmt.Errorf("Name collision generating an unused node name. Please retry this operation.")
}
daemon.Spec.Template.Spec.NodeName = nodeName
reaper.Experimental().DaemonSets(namespace).Update(daemon)
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
updatedDS, err := reaper.Experimental().DaemonSets(namespace).Get(name)
if err != nil {
return false, nil
}
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
}); err != nil {
return "", err
}
if err := reaper.Experimental().DaemonSets(namespace).Delete(name); err != nil {
return "", err
}
return fmt.Sprintf("%s stopped", name), nil
}
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
pods := reaper.Pods(namespace) pods := reaper.Pods(namespace)
_, err := pods.Get(name) _, err := pods.Get(name)

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/apis/experimental"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -148,6 +149,15 @@ func testDaemonSets(f *Framework) {
}, },
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
Expect(err).NotTo(HaveOccurred())
_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
}()
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -210,7 +220,7 @@ func testDaemonSets(f *Framework) {
err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name})) err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
By("remove the node selector and wait for") By("remove the node selector and wait for daemons to be unscheduled")
newNode, err = nodeClient.Get(newNode.Name) newNode, err = nodeClient.Get(newNode.Name)
Expect(err).NotTo(HaveOccurred(), "error getting node") Expect(err).NotTo(HaveOccurred(), "error getting node")
newNode.Labels = map[string]string{} newNode.Labels = map[string]string{}
@ -218,4 +228,7 @@ func testDaemonSets(f *Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))). Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes") NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.")
Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred())
} }