rebase onto head

This commit is contained in:
Mike Danese 2015-09-12 09:46:10 -07:00
parent 30f5b93d2f
commit 54b0faf39a
7 changed files with 68 additions and 93 deletions

View File

@ -287,6 +287,7 @@ _kubectl_get()
must_have_one_flag=()
must_have_one_noun=()
must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints")
must_have_one_noun+=("event")
@ -328,6 +329,7 @@ _kubectl_describe()
must_have_one_flag=()
must_have_one_noun=()
must_have_one_noun+=("daemonset")
must_have_one_noun+=("limitrange")
must_have_one_noun+=("minion")
must_have_one_noun+=("namespace")
@ -456,6 +458,7 @@ _kubectl_delete()
must_have_one_flag=()
must_have_one_noun=()
must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints")
must_have_one_noun+=("event")
@ -827,6 +830,7 @@ _kubectl_label()
must_have_one_flag=()
must_have_one_noun=()
must_have_one_noun+=("componentstatus")
must_have_one_noun+=("daemonset")
must_have_one_noun+=("deployment")
must_have_one_noun+=("endpoints")
must_have_one_noun+=("event")

View File

@ -112,6 +112,7 @@ __custom_func() {
valid_resources = `Valid resource types include:
* pods (aka 'po')
* replicationcontrollers (aka 'rc')
* daemonsets (aka 'ds')
* services (aka 'svc')
* events (aka 'ev')
* nodes (aka 'no')

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/experimental"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/fields"
qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util"
@ -70,7 +69,7 @@ func describerMap(c *client.Client) map[string]Describer {
m := map[string]Describer{
"Pod": &PodDescriber{c},
"ReplicationController": &ReplicationControllerDescriber{c},
"Daemon": &DaemonDescriber{c},
"DaemonSet": &DaemonSetDescriber{c},
"Secret": &SecretDescriber{c},
"Service": &ServiceDescriber{c},
"ServiceAccount": &ServiceAccountDescriber{c},
@ -130,7 +129,7 @@ func init() {
describePod,
describeService,
describeReplicationController,
describeDaemon,
describeDaemonSet,
describeNode,
describeNamespace,
)
@ -426,7 +425,6 @@ type PodDescriber struct {
func (d *PodDescriber) Describe(namespace, name string) (string, error) {
rc := d.ReplicationControllers(namespace)
dc := d.Daemons(namespace)
pc := d.Pods(namespace)
pod, err := pc.Get(name)
@ -457,15 +455,11 @@ func (d *PodDescriber) Describe(namespace, name string) (string, error) {
if err != nil {
return "", err
}
daemons, err := getDaemonsForLabels(dc, labels.Set(pod.Labels))
if err != nil {
return "", err
}
return describePod(pod, rcs, daemons, events)
return describePod(pod, rcs, events)
}
func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi.Daemon, events *api.EventList) (string, error) {
func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", pod.Name)
fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace)
@ -485,7 +479,6 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi
fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message)
fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP)
fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs))
fmt.Fprintf(out, "Daemons:\t%s\n", printDaemonsByLabels(daemons))
fmt.Fprintf(out, "Containers:\n")
describeContainers(pod, out)
if len(pod.Status.Conditions) > 0 {
@ -905,13 +898,13 @@ func describeJob(job *experimental.Job, events *api.EventList) (string, error) {
})
}
// DaemonDescriber generates information about a daemon and the pods it has created.
type DaemonDescriber struct {
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
type DaemonSetDescriber struct {
client.Interface
}
func (d *DaemonDescriber) Describe(namespace, name string) (string, error) {
dc := d.Daemons(namespace)
func (d *DaemonSetDescriber) Describe(namespace, name string) (string, error) {
dc := d.Experimental().DaemonSets(namespace)
pc := d.Pods(namespace)
daemon, err := dc.Get(name)
@ -926,10 +919,10 @@ func (d *DaemonDescriber) Describe(namespace, name string) (string, error) {
events, _ := d.Events(namespace).Search(daemon)
return describeDaemon(daemon, events, running, waiting, succeeded, failed)
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
}
func describeDaemon(daemon *expapi.Daemon, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
func describeDaemonSet(daemon *experimental.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", daemon.Name)
if daemon.Spec.Template != nil {
@ -1402,28 +1395,28 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
}
}
// Get all daemons whose selectors would match a given set of labels.
// Get all daemon set whose selectors would match a given set of labels.
// TODO: Move this to pkg/client and ideally implement it server-side (instead
// of getting all RC's and searching through them manually).
// of getting all DS's and searching through them manually).
// TODO: write an interface for controllers and fuse getReplicationControllersForLabels
// and getDaemonsForLabels.
func getDaemonsForLabels(c client.DaemonInterface, labelsToMatch labels.Labels) ([]expapi.Daemon, error) {
// Get all daemon controllers.
// TODO this needs a namespace scope as argument
daemons, err := c.List(labels.Everything())
// and getDaemonSetsForLabels.
func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.Labels) ([]experimental.DaemonSet, error) {
// Get all daemon sets
// TODO: this needs a namespace scope as argument
dss, err := c.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("error getting daemons: %v", err)
return nil, fmt.Errorf("error getting daemon set: %v", err)
}
// Find the ones that match labelsToMatch.
var matchingDaemons []expapi.Daemon
for _, daemon := range daemons.Items {
selector := labels.SelectorFromSet(daemon.Spec.Selector)
var matchingDaemonSets []experimental.DaemonSet
for _, ds := range dss.Items {
selector := labels.SelectorFromSet(ds.Spec.Selector)
if selector.Matches(labelsToMatch) {
matchingDaemons = append(matchingDaemons, daemon)
matchingDaemonSets = append(matchingDaemonSets, ds)
}
}
return matchingDaemons, nil
return matchingDaemonSets, nil
}
// Get all replication controllers whose selectors would match a given set of
@ -1449,20 +1442,6 @@ func getReplicationControllersForLabels(c client.ReplicationControllerInterface,
return matchingRCs, nil
}
func printDaemonsByLabels(matchingDaemons []expapi.Daemon) string {
// Format the matching RC's into strings.
var daemonStrings []string
for _, daemon := range matchingDaemons {
daemonStrings = append(daemonStrings, fmt.Sprintf("%s (%d desired, %d nodes scheduled, %d nodes misscheduled)", daemon.Name, daemon.Status.DesiredNumberScheduled, daemon.Status.CurrentNumberScheduled, daemon.Status.NumberMisscheduled))
}
list := strings.Join(daemonStrings, ", ")
if list == "" {
return "<none>"
}
return list
}
func printReplicationControllersByLabels(matchingRCs []api.ReplicationController) string {
// Format the matching RC's into strings.
var rcStrings []string

View File

@ -105,7 +105,7 @@ func expandResourceShortcut(resource string) string {
"pvc": "persistentvolumeclaims",
"quota": "resourcequotas",
"rc": "replicationcontrollers",
"dm": "daemons",
"ds": "daemonsets",
"svc": "services",
}
if expanded, ok := shortForms[resource]; ok {

View File

@ -384,7 +384,7 @@ var jobColumns = []string{"JOB", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "SUCCES
var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"}
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
var daemonColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"}
var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"}
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
var limitRangeColumns = []string{"NAME", "AGE"}
var resourceQuotaColumns = []string{"NAME", "AGE"}
@ -407,8 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
h.Handler(podTemplateColumns, printPodTemplateList)
h.Handler(replicationControllerColumns, printReplicationController)
h.Handler(replicationControllerColumns, printReplicationControllerList)
h.Handler(daemonColumns, printDaemon)
h.Handler(daemonColumns, printDaemonList)
h.Handler(daemonSetColumns, printDaemonSet)
h.Handler(daemonSetColumns, printDaemonSetList)
h.Handler(jobColumns, printJob)
h.Handler(jobColumns, printJobList)
h.Handler(serviceColumns, printService)
@ -813,11 +813,11 @@ func printServiceList(list *api.ServiceList, w io.Writer, withNamespace bool, wi
return nil
}
func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
name := daemon.Name
namespace := daemon.Namespace
func printDaemonSet(ds *experimental.DaemonSet, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
name := ds.Name
namespace := ds.Namespace
containers := daemon.Spec.Template.Spec.Containers
containers := ds.Spec.Template.Spec.Containers
var firstContainer api.Container
if len(containers) > 0 {
firstContainer, containers = containers[0], containers[1:]
@ -828,16 +828,16 @@ func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bo
return err
}
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s",
name,
firstContainer.Name,
firstContainer.Image,
labels.FormatLabels(daemon.Spec.Selector),
labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector),
labels.FormatLabels(ds.Spec.Selector),
labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector),
); err != nil {
return err
}
if _, err := fmt.Fprint(w, appendLabels(daemon.Labels, columnLabels)); err != nil {
if _, err := fmt.Fprint(w, appendLabels(ds.Labels, columnLabels)); err != nil {
return err
}
@ -858,9 +858,9 @@ func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bo
return nil
}
func printDaemonList(list *expapi.DaemonList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
for _, daemon := range list.Items {
if err := printDaemon(&daemon, w, withNamespace, wide, showAll, columnLabels); err != nil {
func printDaemonSetList(list *experimental.DaemonSetList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error {
for _, ds := range list.Items {
if err := printDaemonSet(&ds, w, withNamespace, wide, showAll, columnLabels); err != nil {
return err
}
}

View File

@ -59,8 +59,8 @@ func ReaperFor(kind string, c client.Interface) (Reaper, error) {
switch kind {
case "ReplicationController":
return &ReplicationControllerReaper{c, Interval, Timeout}, nil
case "Daemon":
return &DaemonReaper{c, Interval, Timeout}, nil
case "DaemonSet":
return &DaemonSetReaper{c, Interval, Timeout}, nil
case "Pod":
return &PodReaper{c}, nil
case "Service":
@ -77,7 +77,7 @@ type ReplicationControllerReaper struct {
client.Interface
pollInterval, timeout time.Duration
}
type DaemonReaper struct {
type DaemonSetReaper struct {
client.Interface
pollInterval, timeout time.Duration
}
@ -176,57 +176,48 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
return fmt.Sprintf("%s stopped", name), nil
}
func (reaper *DaemonReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
// Retrieve the daemon we want to stop.
daemonClient := reaper.Daemons(namespace)
daemon, err := daemonClient.Get(name)
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
daemon, err := reaper.Experimental().DaemonSets(namespace).Get(name)
if err != nil {
return "", err
}
// Update the daemon to select for a non-existent NodeName.
// The daemon manager will then kill all the daemon pods corresponding to daemon daemon.
// Update the daemon set to select for a non-existent NodeName.
// The daemon set controller will then kill all the daemon pods corresponding to daemon set.
nodes, err := reaper.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return "", err
}
var fuzzer = fuzz.New()
var nameExists bool
numRetries := 1
for try := 0; try <= numRetries; try++ {
var nodeName string
fuzzer.Fuzz(&nodeName)
nameExists = false
for _, node := range nodes.Items {
nameExists = nameExists || node.Name == nodeName
}
if !nameExists {
daemon.Spec.Template.Spec.NodeName = nodeName
break
}
var nodeName string
fuzzer.Fuzz(&nodeName)
nameExists = false
for _, node := range nodes.Items {
nameExists = nameExists || node.Name == nodeName
}
if nameExists {
// Probability of reaching here is extremely low, most likely indicates a programming bug/library error.
return "", fmt.Errorf("Failed to stop node.")
return "", fmt.Errorf("Name collision generating an unused node name. Please retry this operation.")
}
daemonClient.Update(daemon)
// Wait for the daemon manager to kill all the daemon's daemon pods.
daemonPodsKilled := func() (bool, error) {
updatedDc, err := daemonClient.Get(name)
daemon.Spec.Template.Spec.NodeName = nodeName
reaper.Experimental().DaemonSets(namespace).Update(daemon)
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
updatedDS, err := reaper.Experimental().DaemonSets(namespace).Get(name)
if err != nil {
// We don't return an error, because returning an error will abort wait.Poll, but
// if there's an error, we want to try getting the daemon again.
return false, nil
}
return updatedDc.Status.CurrentNumberScheduled+updatedDc.Status.NumberMisscheduled == 0, nil
}
if err := wait.Poll(reaper.pollInterval, reaper.timeout, daemonPodsKilled); err != nil {
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
}); err != nil {
return "", err
}
// Finally, kill the daemon.
if err := daemonClient.Delete(name); err != nil {
if err := reaper.Experimental().DaemonSets(namespace).Delete(name); err != nil {
return "", err
}
return fmt.Sprintf("%s stopped", name), nil

View File

@ -210,7 +210,7 @@ func testDaemonSets(f *Framework) {
err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
By("remove the node selector and wait for")
By("remove the node selector and wait for daemons to be unscheduled")
newNode, err = nodeClient.Get(newNode.Name)
Expect(err).NotTo(HaveOccurred(), "error getting node")
newNode.Labels = map[string]string{}