mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 00:07:50 +00:00
Merge pull request #10008 from hurf/age_all
Add age column for all resources when using 'kubect get'
This commit is contained in:
commit
18dc230418
@ -24,9 +24,6 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
MINIONS_FILE=/tmp/minions-$$
|
||||
trap 'rm -rf "${MINIONS_FILE}"' EXIT
|
||||
|
||||
EXPECTED_NUM_NODES="${NUM_MINIONS}"
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
||||
EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1))
|
||||
@ -34,21 +31,18 @@ fi
|
||||
# Make several attempts to deal with slow cluster birth.
|
||||
attempt=0
|
||||
while true; do
|
||||
# The "kubectl get nodes" output is three columns like this:
|
||||
# The "kubectl get nodes -o template" exports node information.
|
||||
#
|
||||
# NAME LABELS STATUS
|
||||
# kubernetes-minion-03nb <none> Ready
|
||||
#
|
||||
# Echo the output, strip the first line, then gather 2 counts:
|
||||
# Echo the output and gather 2 counts:
|
||||
# - Total number of nodes.
|
||||
# - Number of "ready" nodes.
|
||||
#
|
||||
# Suppress errors from kubectl output because during cluster bootstrapping
|
||||
# for clusters where the master node is registered, the apiserver will become
|
||||
# available and then get restarted as the kubelet configures the docker bridge.
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes > "${MINIONS_FILE}" 2> /dev/null || true
|
||||
found=$(cat "${MINIONS_FILE}" | sed '1d' | grep -c .) || true
|
||||
ready=$(cat "${MINIONS_FILE}" | sed '1d' | awk '{print $NF}' | grep -c '^Ready') || true
|
||||
nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{with index .status.conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1)
|
||||
found=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:') || true
|
||||
ready=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:True') || true
|
||||
|
||||
if (( "${found}" == "${EXPECTED_NUM_NODES}" )) && (( "${ready}" == "${EXPECTED_NUM_NODES}")); then
|
||||
break
|
||||
@ -56,7 +50,7 @@ while true; do
|
||||
# Set the timeout to ~10minutes (40 x 15 second) to avoid timeouts for 100-node clusters.
|
||||
if (( attempt > 40 )); then
|
||||
echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${EXPECTED_NUM_NODES}. Your cluster may not be working.${color_norm}"
|
||||
cat -n "${MINIONS_FILE}"
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes
|
||||
exit 2
|
||||
else
|
||||
echo -e "${color_yellow}Waiting for ${EXPECTED_NUM_NODES} ready nodes. ${ready} ready nodes, ${found} registered. Retrying.${color_norm}"
|
||||
@ -65,35 +59,28 @@ while true; do
|
||||
sleep 15
|
||||
fi
|
||||
done
|
||||
echo "Found ${found} nodes."
|
||||
echo -n " "
|
||||
head -n 1 "${MINIONS_FILE}"
|
||||
tail -n +2 "${MINIONS_FILE}" | cat -n
|
||||
echo "Found ${found} node(s)."
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes
|
||||
|
||||
attempt=0
|
||||
while true; do
|
||||
kubectl_output=$("${KUBE_ROOT}/cluster/kubectl.sh" get cs) || true
|
||||
|
||||
# The "kubectl componentstatuses" output is four columns like this:
|
||||
# The "kubectl componentstatuses -o template" exports components health information.
|
||||
#
|
||||
# COMPONENT HEALTH MSG ERR
|
||||
# controller-manager Healthy ok nil
|
||||
#
|
||||
# Parse the output to capture the value of the second column("HEALTH"), then use grep to
|
||||
# count the number of times it doesn't match "Healthy".
|
||||
non_success_count=$(echo "${kubectl_output}" | \
|
||||
sed '1d' |
|
||||
sed -n 's/^[[:alnum:][:punct:]]/&/p' | \
|
||||
grep --invert-match -c '^[[:alnum:][:punct:]]\{1,\}[[:space:]]\{1,\}Healthy') || true
|
||||
# Echo the output and gather 2 counts:
|
||||
# - Total number of componentstatuses.
|
||||
# - Number of "healthy" components.
|
||||
cs_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1) || true
|
||||
componentstatuses=$(echo "${cs_status}" | tr "," "\n" | grep -c 'Healthy:') || true
|
||||
healthy=$(echo "${cs_status}" | tr "," "\n" | grep -c 'Healthy:True') || true
|
||||
|
||||
if ((non_success_count > 0)); then
|
||||
if ((componentstatuses > healthy)); then
|
||||
if ((attempt < 5)); then
|
||||
echo -e "${color_yellow}Cluster not working yet.${color_norm}"
|
||||
attempt=$((attempt+1))
|
||||
sleep 30
|
||||
else
|
||||
echo -e " ${color_yellow}Validate output:${color_norm}"
|
||||
echo "${kubectl_output}"
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" get cs
|
||||
echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}"
|
||||
exit 1
|
||||
fi
|
||||
@ -103,5 +90,5 @@ while true; do
|
||||
done
|
||||
|
||||
echo "Validate output:"
|
||||
echo "${kubectl_output}"
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" get cs
|
||||
echo -e "${color_green}Cluster validation succeeded${color_norm}"
|
||||
|
@ -264,9 +264,10 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
||||
cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr)
|
||||
ctrl := &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: "beep",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
Name: "foo",
|
||||
Namespace: "beep",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 1,
|
||||
@ -291,8 +292,8 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
||||
fmt.Printf("Unexpected error: %v", err)
|
||||
}
|
||||
// Output:
|
||||
// NAMESPACE CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
// beep foo foo someimage foo=bar 1
|
||||
// NAMESPACE CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
|
||||
// beep foo foo someimage foo=bar 1 10y
|
||||
}
|
||||
|
||||
func ExamplePrintPodWithWideFormat() {
|
||||
@ -342,8 +343,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
||||
Items: []api.Service{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "svc1",
|
||||
Namespace: "ns1",
|
||||
Name: "svc1",
|
||||
Namespace: "ns1",
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
Labels: map[string]string{
|
||||
"l1": "value",
|
||||
},
|
||||
@ -362,8 +364,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "svc2",
|
||||
Namespace: "ns2",
|
||||
Name: "svc2",
|
||||
Namespace: "ns2",
|
||||
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||
Labels: map[string]string{
|
||||
"l1": "dolla-bill-yall",
|
||||
},
|
||||
@ -388,10 +391,10 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
||||
fmt.Printf("Unexpected error: %v", err)
|
||||
}
|
||||
// Output:
|
||||
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) L1|
|
||||
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP value|
|
||||
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) AGE L1|
|
||||
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP 10y value|
|
||||
// | 53/TCP |
|
||||
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP dolla-bill-yall|
|
||||
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP 10y dolla-bill-yall|
|
||||
// | 8080/TCP |
|
||||
// ||
|
||||
}
|
||||
|
@ -258,18 +258,18 @@ func (h *HumanReadablePrinter) HandledResources() []string {
|
||||
// pkg/kubectl/cmd/get.go to reflect the new resource type.
|
||||
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
||||
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
||||
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS"}
|
||||
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)"}
|
||||
var endpointColumns = []string{"NAME", "ENDPOINTS"}
|
||||
var nodeColumns = []string{"NAME", "LABELS", "STATUS"}
|
||||
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS", "AGE"}
|
||||
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)", "AGE"}
|
||||
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
||||
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
||||
var limitRangeColumns = []string{"NAME"}
|
||||
var resourceQuotaColumns = []string{"NAME"}
|
||||
var namespaceColumns = []string{"NAME", "LABELS", "STATUS"}
|
||||
var secretColumns = []string{"NAME", "TYPE", "DATA"}
|
||||
var serviceAccountColumns = []string{"NAME", "SECRETS"}
|
||||
var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON"}
|
||||
var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME"}
|
||||
var limitRangeColumns = []string{"NAME", "AGE"}
|
||||
var resourceQuotaColumns = []string{"NAME", "AGE"}
|
||||
var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||
var secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"}
|
||||
var serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"}
|
||||
var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON", "AGE"}
|
||||
var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME", "AGE"}
|
||||
var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"}
|
||||
var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too.
|
||||
|
||||
@ -517,12 +517,13 @@ func printReplicationController(controller *api.ReplicationController, w io.Writ
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d",
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s",
|
||||
name,
|
||||
firstContainer.Name,
|
||||
firstContainer.Image,
|
||||
formatLabels(controller.Spec.Selector),
|
||||
controller.Spec.Replicas,
|
||||
translateTimestamp(controller.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -574,8 +575,13 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool,
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s", name, formatLabels(svc.Labels),
|
||||
formatLabels(svc.Spec.Selector), ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s\t%s",
|
||||
name,
|
||||
formatLabels(svc.Labels),
|
||||
formatLabels(svc.Spec.Selector),
|
||||
ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol,
|
||||
translateTimestamp(svc.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
||||
@ -629,7 +635,7 @@ func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, w
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s", name, formatEndpoints(endpoints, nil)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, formatEndpoints(endpoints, nil), translateTimestamp(endpoints.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(endpoints.Labels, columnLabels))
|
||||
@ -649,7 +655,8 @@ func printNamespace(item *api.Namespace, w io.Writer, withNamespace bool, wide b
|
||||
if withNamespace {
|
||||
return fmt.Errorf("namespace is not namespaced")
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", item.Name, formatLabels(item.Labels), item.Status.Phase); err != nil {
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", item.Name, formatLabels(item.Labels), item.Status.Phase, translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||
@ -674,7 +681,7 @@ func printSecret(item *api.Secret, w io.Writer, withNamespace bool, wide bool, c
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%v", name, item.Type, len(item.Data)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%s", name, item.Type, len(item.Data), translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||
@ -700,7 +707,7 @@ func printServiceAccount(item *api.ServiceAccount, w io.Writer, withNamespace bo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%d", name, len(item.Secrets)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%d\t%s", name, len(item.Secrets), translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||
@ -744,7 +751,7 @@ func printNode(node *api.Node, w io.Writer, withNamespace bool, wide bool, colum
|
||||
status = append(status, "SchedulingDisabled")
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", node.Name, formatLabels(node.Labels), strings.Join(status, ",")); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", node.Name, formatLabels(node.Labels), strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(node.Labels, columnLabels))
|
||||
@ -778,7 +785,15 @@ func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, withNamespace
|
||||
aQty := pv.Spec.Capacity[api.ResourceStorage]
|
||||
aSize := aQty.Value()
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s", name, formatLabels(pv.Labels), aSize, modesStr, pv.Status.Phase, claimRefUID, pv.Status.Reason); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s",
|
||||
name,
|
||||
formatLabels(pv.Labels),
|
||||
aSize, modesStr,
|
||||
pv.Status.Phase,
|
||||
claimRefUID,
|
||||
pv.Status.Reason,
|
||||
translateTimestamp(pv.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(pv.Labels, columnLabels))
|
||||
@ -809,7 +824,8 @@ func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, wit
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, formatLabels(pvc.Labels), pvc.Status.Phase, pvc.Spec.VolumeName); err != nil {
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, pvc.Labels, pvc.Status.Phase, pvc.Spec.VolumeName, translateTimestamp(pvc.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(pvc.Labels, columnLabels))
|
||||
@ -871,7 +887,11 @@ func printLimitRange(limitRange *api.LimitRange, w io.Writer, withNamespace bool
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s", name); err != nil {
|
||||
if _, err := fmt.Fprintf(
|
||||
w, "%s\t%s",
|
||||
name,
|
||||
translateTimestamp(limitRange.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(limitRange.Labels, columnLabels))
|
||||
@ -898,7 +918,11 @@ func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, withNames
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s", name); err != nil {
|
||||
if _, err := fmt.Fprintf(
|
||||
w, "%s\t%s",
|
||||
name,
|
||||
translateTimestamp(resourceQuota.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(w, appendLabels(resourceQuota.Labels, columnLabels))
|
||||
|
Loading…
Reference in New Issue
Block a user