mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #10008 from hurf/age_all
Add age column for all resources when using 'kubect get'
This commit is contained in:
commit
18dc230418
@ -24,9 +24,6 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|||||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||||
|
|
||||||
MINIONS_FILE=/tmp/minions-$$
|
|
||||||
trap 'rm -rf "${MINIONS_FILE}"' EXIT
|
|
||||||
|
|
||||||
EXPECTED_NUM_NODES="${NUM_MINIONS}"
|
EXPECTED_NUM_NODES="${NUM_MINIONS}"
|
||||||
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
||||||
EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1))
|
EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1))
|
||||||
@ -34,21 +31,18 @@ fi
|
|||||||
# Make several attempts to deal with slow cluster birth.
|
# Make several attempts to deal with slow cluster birth.
|
||||||
attempt=0
|
attempt=0
|
||||||
while true; do
|
while true; do
|
||||||
# The "kubectl get nodes" output is three columns like this:
|
# The "kubectl get nodes -o template" exports node information.
|
||||||
#
|
#
|
||||||
# NAME LABELS STATUS
|
# Echo the output and gather 2 counts:
|
||||||
# kubernetes-minion-03nb <none> Ready
|
|
||||||
#
|
|
||||||
# Echo the output, strip the first line, then gather 2 counts:
|
|
||||||
# - Total number of nodes.
|
# - Total number of nodes.
|
||||||
# - Number of "ready" nodes.
|
# - Number of "ready" nodes.
|
||||||
#
|
#
|
||||||
# Suppress errors from kubectl output because during cluster bootstrapping
|
# Suppress errors from kubectl output because during cluster bootstrapping
|
||||||
# for clusters where the master node is registered, the apiserver will become
|
# for clusters where the master node is registered, the apiserver will become
|
||||||
# available and then get restarted as the kubelet configures the docker bridge.
|
# available and then get restarted as the kubelet configures the docker bridge.
|
||||||
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes > "${MINIONS_FILE}" 2> /dev/null || true
|
nodes_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template='{{range .items}}{{with index .status.conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1)
|
||||||
found=$(cat "${MINIONS_FILE}" | sed '1d' | grep -c .) || true
|
found=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:') || true
|
||||||
ready=$(cat "${MINIONS_FILE}" | sed '1d' | awk '{print $NF}' | grep -c '^Ready') || true
|
ready=$(echo "${nodes_status}" | tr "," "\n" | grep -c 'Ready:True') || true
|
||||||
|
|
||||||
if (( "${found}" == "${EXPECTED_NUM_NODES}" )) && (( "${ready}" == "${EXPECTED_NUM_NODES}")); then
|
if (( "${found}" == "${EXPECTED_NUM_NODES}" )) && (( "${ready}" == "${EXPECTED_NUM_NODES}")); then
|
||||||
break
|
break
|
||||||
@ -56,7 +50,7 @@ while true; do
|
|||||||
# Set the timeout to ~10minutes (40 x 15 second) to avoid timeouts for 100-node clusters.
|
# Set the timeout to ~10minutes (40 x 15 second) to avoid timeouts for 100-node clusters.
|
||||||
if (( attempt > 40 )); then
|
if (( attempt > 40 )); then
|
||||||
echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${EXPECTED_NUM_NODES}. Your cluster may not be working.${color_norm}"
|
echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${EXPECTED_NUM_NODES}. Your cluster may not be working.${color_norm}"
|
||||||
cat -n "${MINIONS_FILE}"
|
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes
|
||||||
exit 2
|
exit 2
|
||||||
else
|
else
|
||||||
echo -e "${color_yellow}Waiting for ${EXPECTED_NUM_NODES} ready nodes. ${ready} ready nodes, ${found} registered. Retrying.${color_norm}"
|
echo -e "${color_yellow}Waiting for ${EXPECTED_NUM_NODES} ready nodes. ${ready} ready nodes, ${found} registered. Retrying.${color_norm}"
|
||||||
@ -65,35 +59,28 @@ while true; do
|
|||||||
sleep 15
|
sleep 15
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
echo "Found ${found} nodes."
|
echo "Found ${found} node(s)."
|
||||||
echo -n " "
|
"${KUBE_ROOT}/cluster/kubectl.sh" get nodes
|
||||||
head -n 1 "${MINIONS_FILE}"
|
|
||||||
tail -n +2 "${MINIONS_FILE}" | cat -n
|
|
||||||
|
|
||||||
attempt=0
|
attempt=0
|
||||||
while true; do
|
while true; do
|
||||||
kubectl_output=$("${KUBE_ROOT}/cluster/kubectl.sh" get cs) || true
|
# The "kubectl componentstatuses -o template" exports components health information.
|
||||||
|
|
||||||
# The "kubectl componentstatuses" output is four columns like this:
|
|
||||||
#
|
#
|
||||||
# COMPONENT HEALTH MSG ERR
|
# Echo the output and gather 2 counts:
|
||||||
# controller-manager Healthy ok nil
|
# - Total number of componentstatuses.
|
||||||
#
|
# - Number of "healthy" components.
|
||||||
# Parse the output to capture the value of the second column("HEALTH"), then use grep to
|
cs_status=$("${KUBE_ROOT}/cluster/kubectl.sh" get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}},{{end}}{{end}}' --api-version=v1) || true
|
||||||
# count the number of times it doesn't match "Healthy".
|
componentstatuses=$(echo "${cs_status}" | tr "," "\n" | grep -c 'Healthy:') || true
|
||||||
non_success_count=$(echo "${kubectl_output}" | \
|
healthy=$(echo "${cs_status}" | tr "," "\n" | grep -c 'Healthy:True') || true
|
||||||
sed '1d' |
|
|
||||||
sed -n 's/^[[:alnum:][:punct:]]/&/p' | \
|
|
||||||
grep --invert-match -c '^[[:alnum:][:punct:]]\{1,\}[[:space:]]\{1,\}Healthy') || true
|
|
||||||
|
|
||||||
if ((non_success_count > 0)); then
|
if ((componentstatuses > healthy)); then
|
||||||
if ((attempt < 5)); then
|
if ((attempt < 5)); then
|
||||||
echo -e "${color_yellow}Cluster not working yet.${color_norm}"
|
echo -e "${color_yellow}Cluster not working yet.${color_norm}"
|
||||||
attempt=$((attempt+1))
|
attempt=$((attempt+1))
|
||||||
sleep 30
|
sleep 30
|
||||||
else
|
else
|
||||||
echo -e " ${color_yellow}Validate output:${color_norm}"
|
echo -e " ${color_yellow}Validate output:${color_norm}"
|
||||||
echo "${kubectl_output}"
|
"${KUBE_ROOT}/cluster/kubectl.sh" get cs
|
||||||
echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}"
|
echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -103,5 +90,5 @@ while true; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
echo "Validate output:"
|
echo "Validate output:"
|
||||||
echo "${kubectl_output}"
|
"${KUBE_ROOT}/cluster/kubectl.sh" get cs
|
||||||
echo -e "${color_green}Cluster validation succeeded${color_norm}"
|
echo -e "${color_green}Cluster validation succeeded${color_norm}"
|
||||||
|
@ -264,9 +264,10 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
|||||||
cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr)
|
cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr)
|
||||||
ctrl := &api.ReplicationController{
|
ctrl := &api.ReplicationController{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Namespace: "beep",
|
Namespace: "beep",
|
||||||
Labels: map[string]string{"foo": "bar"},
|
Labels: map[string]string{"foo": "bar"},
|
||||||
|
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||||
},
|
},
|
||||||
Spec: api.ReplicationControllerSpec{
|
Spec: api.ReplicationControllerSpec{
|
||||||
Replicas: 1,
|
Replicas: 1,
|
||||||
@ -291,8 +292,8 @@ func ExamplePrintReplicationControllerWithNamespace() {
|
|||||||
fmt.Printf("Unexpected error: %v", err)
|
fmt.Printf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// NAMESPACE CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
// NAMESPACE CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
|
||||||
// beep foo foo someimage foo=bar 1
|
// beep foo foo someimage foo=bar 1 10y
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExamplePrintPodWithWideFormat() {
|
func ExamplePrintPodWithWideFormat() {
|
||||||
@ -342,8 +343,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||||||
Items: []api.Service{
|
Items: []api.Service{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "svc1",
|
Name: "svc1",
|
||||||
Namespace: "ns1",
|
Namespace: "ns1",
|
||||||
|
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"l1": "value",
|
"l1": "value",
|
||||||
},
|
},
|
||||||
@ -362,8 +364,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "svc2",
|
Name: "svc2",
|
||||||
Namespace: "ns2",
|
Namespace: "ns2",
|
||||||
|
CreationTimestamp: util.Time{time.Now().AddDate(-10, 0, 0)},
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"l1": "dolla-bill-yall",
|
"l1": "dolla-bill-yall",
|
||||||
},
|
},
|
||||||
@ -388,10 +391,10 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||||||
fmt.Printf("Unexpected error: %v", err)
|
fmt.Printf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) L1|
|
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) AGE L1|
|
||||||
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP value|
|
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP 10y value|
|
||||||
// | 53/TCP |
|
// | 53/TCP |
|
||||||
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP dolla-bill-yall|
|
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP 10y dolla-bill-yall|
|
||||||
// | 8080/TCP |
|
// | 8080/TCP |
|
||||||
// ||
|
// ||
|
||||||
}
|
}
|
||||||
|
@ -258,18 +258,18 @@ func (h *HumanReadablePrinter) HandledResources() []string {
|
|||||||
// pkg/kubectl/cmd/get.go to reflect the new resource type.
|
// pkg/kubectl/cmd/get.go to reflect the new resource type.
|
||||||
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
||||||
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
||||||
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS"}
|
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS", "AGE"}
|
||||||
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)"}
|
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)", "AGE"}
|
||||||
var endpointColumns = []string{"NAME", "ENDPOINTS"}
|
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
||||||
var nodeColumns = []string{"NAME", "LABELS", "STATUS"}
|
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||||
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
||||||
var limitRangeColumns = []string{"NAME"}
|
var limitRangeColumns = []string{"NAME", "AGE"}
|
||||||
var resourceQuotaColumns = []string{"NAME"}
|
var resourceQuotaColumns = []string{"NAME", "AGE"}
|
||||||
var namespaceColumns = []string{"NAME", "LABELS", "STATUS"}
|
var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||||
var secretColumns = []string{"NAME", "TYPE", "DATA"}
|
var secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"}
|
||||||
var serviceAccountColumns = []string{"NAME", "SECRETS"}
|
var serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"}
|
||||||
var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON"}
|
var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON", "AGE"}
|
||||||
var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME"}
|
var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME", "AGE"}
|
||||||
var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"}
|
var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"}
|
||||||
var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too.
|
var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too.
|
||||||
|
|
||||||
@ -517,12 +517,13 @@ func printReplicationController(controller *api.ReplicationController, w io.Writ
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d",
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%s",
|
||||||
name,
|
name,
|
||||||
firstContainer.Name,
|
firstContainer.Name,
|
||||||
firstContainer.Image,
|
firstContainer.Image,
|
||||||
formatLabels(controller.Spec.Selector),
|
formatLabels(controller.Spec.Selector),
|
||||||
controller.Spec.Replicas,
|
controller.Spec.Replicas,
|
||||||
|
translateTimestamp(controller.CreationTimestamp),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -574,8 +575,13 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s", name, formatLabels(svc.Labels),
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s\t%s",
|
||||||
formatLabels(svc.Spec.Selector), ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol); err != nil {
|
name,
|
||||||
|
formatLabels(svc.Labels),
|
||||||
|
formatLabels(svc.Spec.Selector),
|
||||||
|
ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol,
|
||||||
|
translateTimestamp(svc.CreationTimestamp),
|
||||||
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
||||||
@ -629,7 +635,7 @@ func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, w
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s", name, formatEndpoints(endpoints, nil)); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, formatEndpoints(endpoints, nil), translateTimestamp(endpoints.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(endpoints.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(endpoints.Labels, columnLabels))
|
||||||
@ -649,7 +655,8 @@ func printNamespace(item *api.Namespace, w io.Writer, withNamespace bool, wide b
|
|||||||
if withNamespace {
|
if withNamespace {
|
||||||
return fmt.Errorf("namespace is not namespaced")
|
return fmt.Errorf("namespace is not namespaced")
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", item.Name, formatLabels(item.Labels), item.Status.Phase); err != nil {
|
|
||||||
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", item.Name, formatLabels(item.Labels), item.Status.Phase, translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||||
@ -674,7 +681,7 @@ func printSecret(item *api.Secret, w io.Writer, withNamespace bool, wide bool, c
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%v", name, item.Type, len(item.Data)); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%s", name, item.Type, len(item.Data), translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||||
@ -700,7 +707,7 @@ func printServiceAccount(item *api.ServiceAccount, w io.Writer, withNamespace bo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%d", name, len(item.Secrets)); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t%d\t%s", name, len(item.Secrets), translateTimestamp(item.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(item.Labels, columnLabels))
|
||||||
@ -744,7 +751,7 @@ func printNode(node *api.Node, w io.Writer, withNamespace bool, wide bool, colum
|
|||||||
status = append(status, "SchedulingDisabled")
|
status = append(status, "SchedulingDisabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", node.Name, formatLabels(node.Labels), strings.Join(status, ",")); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", node.Name, formatLabels(node.Labels), strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(node.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(node.Labels, columnLabels))
|
||||||
@ -778,7 +785,15 @@ func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, withNamespace
|
|||||||
aQty := pv.Spec.Capacity[api.ResourceStorage]
|
aQty := pv.Spec.Capacity[api.ResourceStorage]
|
||||||
aSize := aQty.Value()
|
aSize := aQty.Value()
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s", name, formatLabels(pv.Labels), aSize, modesStr, pv.Status.Phase, claimRefUID, pv.Status.Reason); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s",
|
||||||
|
name,
|
||||||
|
formatLabels(pv.Labels),
|
||||||
|
aSize, modesStr,
|
||||||
|
pv.Status.Phase,
|
||||||
|
claimRefUID,
|
||||||
|
pv.Status.Reason,
|
||||||
|
translateTimestamp(pv.CreationTimestamp),
|
||||||
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(pv.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(pv.Labels, columnLabels))
|
||||||
@ -809,7 +824,8 @@ func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, wit
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, formatLabels(pvc.Labels), pvc.Status.Phase, pvc.Spec.VolumeName); err != nil {
|
|
||||||
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, pvc.Labels, pvc.Status.Phase, pvc.Spec.VolumeName, translateTimestamp(pvc.CreationTimestamp)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(pvc.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(pvc.Labels, columnLabels))
|
||||||
@ -871,7 +887,11 @@ func printLimitRange(limitRange *api.LimitRange, w io.Writer, withNamespace bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(w, "%s", name); err != nil {
|
if _, err := fmt.Fprintf(
|
||||||
|
w, "%s\t%s",
|
||||||
|
name,
|
||||||
|
translateTimestamp(limitRange.CreationTimestamp),
|
||||||
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(limitRange.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(limitRange.Labels, columnLabels))
|
||||||
@ -898,7 +918,11 @@ func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, withNames
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(w, "%s", name); err != nil {
|
if _, err := fmt.Fprintf(
|
||||||
|
w, "%s\t%s",
|
||||||
|
name,
|
||||||
|
translateTimestamp(resourceQuota.CreationTimestamp),
|
||||||
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err := fmt.Fprint(w, appendLabels(resourceQuota.Labels, columnLabels))
|
_, err := fmt.Fprint(w, appendLabels(resourceQuota.Labels, columnLabels))
|
||||||
|
Loading…
Reference in New Issue
Block a user