Kubectl describe for experimental HorizontalPodAutoscaler

This commit is contained in:
Marcin Wielgus 2015-08-26 16:17:18 +02:00
parent 1a8b400c3e
commit 55cd0d565e
9 changed files with 145 additions and 37 deletions

View File

@ -171,13 +171,13 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
if desiredReplicas != count {
// Going down
if desiredReplicas < count && (hpa.Status.LastScaleTimestamp == nil ||
if desiredReplicas < count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
rescale = true
}
// Going up
if desiredReplicas > count && (hpa.Status.LastScaleTimestamp == nil ||
if desiredReplicas > count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
rescale = true
}
@ -192,11 +192,12 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
}
}
hpa.Status = expapi.HorizontalPodAutoscalerStatus{
status := expapi.HorizontalPodAutoscalerStatus{
CurrentReplicas: count,
DesiredReplicas: desiredReplicas,
CurrentConsumption: currentConsumption,
CurrentConsumption: &currentConsumption,
}
hpa.Status = &status
if rescale {
now := util.NewTime(now)
hpa.Status.LastScaleTimestamp = &now

View File

@ -165,6 +165,10 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
LatestTimestamp: timestamp,
}}}
status := expapi.HorizontalPodAutoscalerStatus{
CurrentReplicas: 1,
DesiredReplicas: 3,
}
updateHpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{
@ -182,10 +186,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
MaxCount: 5,
Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")},
},
Status: expapi.HorizontalPodAutoscalerStatus{
CurrentReplicas: 1,
DesiredReplicas: 3,
},
Status: &status,
}}
heapsterRawResponse, _ := json.Marshal(&metrics)

View File

@ -787,8 +787,13 @@ func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Ho
if err := deepCopy_expapi_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_expapi_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
return err
if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus)
if err := deepCopy_expapi_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
return err
}
} else {
out.Status = nil
}
return nil
}
@ -833,8 +838,13 @@ func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec,
func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if err := deepCopy_expapi_ResourceConsumption(in.CurrentConsumption, &out.CurrentConsumption, c); err != nil {
return err
if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption)
if err := deepCopy_expapi_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
return err
}
} else {
out.CurrentConsumption = nil
}
if in.LastScaleTimestamp != nil {
out.LastScaleTimestamp = new(util.Time)

View File

@ -108,7 +108,7 @@ type HorizontalPodAutoscalerStatus struct {
// CurrentConsumption is the current average consumption of the given resource that the autoscaler will
// try to maintain by adjusting the desired number of pods.
// Two types of resources are supported: "cpu" and "memory".
CurrentConsumption ResourceConsumption `json:"currentConsumption"`
CurrentConsumption *ResourceConsumption `json:"currentConsumption"`
// LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods.
// This is used by the autoscaler to controll how often the number of pods is changed.
@ -124,7 +124,7 @@ type HorizontalPodAutoscaler struct {
Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"`
// Status represents the current information about the autoscaler.
Status HorizontalPodAutoscalerStatus `json:"status,omitempty"`
Status *HorizontalPodAutoscalerStatus `json:"status,omitempty"`
}
// HorizontalPodAutoscaler is a collection of pod autoscalers.

View File

@ -1513,8 +1513,13 @@ func convert_expapi_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *ex
if err := convert_expapi_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus)
if err := convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil {
return err
}
} else {
out.Status = nil
}
return nil
}
@ -1568,8 +1573,13 @@ func convert_expapi_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerS
}
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if err := convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(&in.CurrentConsumption, &out.CurrentConsumption, s); err != nil {
return err
if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption)
if err := convert_expapi_ResourceConsumption_To_v1_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil {
return err
}
} else {
out.CurrentConsumption = nil
}
if in.LastScaleTimestamp != nil {
if err := s.Convert(&in.LastScaleTimestamp, &out.LastScaleTimestamp, 0); err != nil {
@ -1845,8 +1855,13 @@ func convert_v1_HorizontalPodAutoscaler_To_expapi_HorizontalPodAutoscaler(in *Ho
if err := convert_v1_HorizontalPodAutoscalerSpec_To_expapi_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
if in.Status != nil {
out.Status = new(expapi.HorizontalPodAutoscalerStatus)
if err := convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerStatus(in.Status, out.Status, s); err != nil {
return err
}
} else {
out.Status = nil
}
return nil
}
@ -1900,8 +1915,13 @@ func convert_v1_HorizontalPodAutoscalerStatus_To_expapi_HorizontalPodAutoscalerS
}
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if err := convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(&in.CurrentConsumption, &out.CurrentConsumption, s); err != nil {
return err
if in.CurrentConsumption != nil {
out.CurrentConsumption = new(expapi.ResourceConsumption)
if err := convert_v1_ResourceConsumption_To_expapi_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil {
return err
}
} else {
out.CurrentConsumption = nil
}
if in.LastScaleTimestamp != nil {
if err := s.Convert(&in.LastScaleTimestamp, &out.LastScaleTimestamp, 0); err != nil {

View File

@ -789,8 +789,13 @@ func deepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Horizo
if err := deepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
return err
if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus)
if err := deepCopy_v1_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
return err
}
} else {
out.Status = nil
}
return nil
}
@ -835,8 +840,13 @@ func deepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out
func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if err := deepCopy_v1_ResourceConsumption(in.CurrentConsumption, &out.CurrentConsumption, c); err != nil {
return err
if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption)
if err := deepCopy_v1_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
return err
}
} else {
out.CurrentConsumption = nil
}
if in.LastScaleTimestamp != nil {
out.LastScaleTimestamp = new(util.Time)

View File

@ -94,7 +94,7 @@ type HorizontalPodAutoscalerStatus struct {
// CurrentConsumption is the current average consumption of the given resource that the autoscaler will
// try to maintain by adjusting the desired number of pods.
// Two types of resources are supported: "cpu" and "memory".
CurrentConsumption ResourceConsumption `json:"currentConsumption" description:"current resource consumption"`
CurrentConsumption *ResourceConsumption `json:"currentConsumption" description:"current resource consumption"`
// LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods.
// This is used by the autoscaler to controll how often the number of pods is changed.
@ -110,7 +110,7 @@ type HorizontalPodAutoscaler struct {
Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" description:"specification of the desired behavior of the autoscaler; http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status"`
// Status represents the current information about the autoscaler.
Status HorizontalPodAutoscalerStatus `json:"status,omitempty"`
Status *HorizontalPodAutoscalerStatus `json:"status,omitempty"`
}
// HorizontalPodAutoscaler is a collection of pod autoscalers.

View File

@ -113,15 +113,24 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
expClients := NewExperimentalClientCache(clientConfig)
noClientErr := errors.New("could not get client")
getBothClients := func(group string, version string) (client *client.Client, expClient *client.ExperimentalClient, err error) {
err = noClientErr
getBothClients := func(group string, version string) (*client.Client, *client.ExperimentalClient, error) {
switch group {
case "api":
client, err = clients.ClientForVersion(version)
client, err := clients.ClientForVersion(version)
return client, nil, err
case "experimental":
expClient, err = expClients.Client()
client, err := clients.ClientForVersion(version)
if err != nil {
return nil, nil, err
}
expClient, err := expClients.Client()
if err != nil {
return nil, nil, err
}
return client, expClient, err
}
return
return nil, nil, noClientErr
}
return &Factory{
clients: clients,

View File

@ -82,8 +82,13 @@ func describerMap(c *client.Client) map[string]Describer {
return m
}
func expDescriberMap(c *client.ExperimentalClient) map[string]Describer {
return map[string]Describer{}
func expDescriberMap(c *client.Client, exp *client.ExperimentalClient) map[string]Describer {
return map[string]Describer{
"HorizontalPodAutoscaler": &HorizontalPodAutoscalerDescriber{
client: c,
experimental: exp,
},
}
}
// List of all resource types we can describe
@ -102,10 +107,12 @@ func DescribableResources() []string {
func DescriberFor(kind string, c *client.Client, ec *client.ExperimentalClient) (Describer, bool) {
var f Describer
var ok bool
if c != nil {
f, ok = describerMap(c)[kind]
} else if ec != nil {
f, ok = expDescriberMap(ec)[kind]
}
if !ok && c != nil && ec != nil {
f, ok = expDescriberMap(c, ec)[kind]
}
return f, ok
}
@ -1104,6 +1111,56 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin
})
}
// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler.
type HorizontalPodAutoscalerDescriber struct {
client *client.Client
experimental *client.ExperimentalClient
}
func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (string, error) {
hpa, err := d.experimental.HorizontalPodAutoscalers(namespace).Get(name)
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", hpa.Name)
fmt.Fprintf(out, "Namespace:\t%s\n", hpa.Namespace)
fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(hpa.Labels))
fmt.Fprintf(out, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
fmt.Fprintf(out, "Reference:\t%s/%s/%s/%s\n",
hpa.Spec.ScaleRef.Kind,
hpa.Spec.ScaleRef.Namespace,
hpa.Spec.ScaleRef.Name,
hpa.Spec.ScaleRef.Subresource)
fmt.Fprintf(out, "Target resource consumption:\t%s %s\n",
hpa.Spec.Target.Quantity.String(),
hpa.Spec.Target.Resource)
fmt.Fprintf(out, "Current resource consumption:\t")
if hpa.Status != nil && hpa.Status.CurrentConsumption != nil {
fmt.Fprintf(out, "%s %s\n",
hpa.Status.CurrentConsumption.Quantity.String(),
hpa.Status.CurrentConsumption.Resource)
} else {
fmt.Fprintf(out, "<not available>\n")
}
fmt.Fprintf(out, "Min pods:\t%d\n", hpa.Spec.MinCount)
fmt.Fprintf(out, "Max pods:\t%d\n", hpa.Spec.MaxCount)
// TODO: switch to scale subresource once the required code is submitted.
if strings.ToLower(hpa.Spec.ScaleRef.Kind) == "replicationcontroller" {
fmt.Fprintf(out, "ReplicationController pods:\t")
rc, err := d.client.ReplicationControllers(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Name)
if err == nil {
fmt.Fprintf(out, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas)
} else {
fmt.Fprintf(out, "failed to check Replication Controller\n")
}
}
return nil
})
}
func filterNonRunningPods(pods []*api.Pod) []*api.Pod {
if len(pods) == 0 {
return pods