mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Remove legacy metrics client from podautoscaler
This commit is contained in:
parent
e6136c0303
commit
4ebc0c94a4
205
LICENSES/vendor/k8s.io/heapster/LICENSE
generated
vendored
205
LICENSES/vendor/k8s.io/heapster/LICENSE
generated
vendored
@ -1,205 +0,0 @@
|
||||
= vendor/k8s.io/heapster licensed under: =
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
= vendor/k8s.io/heapster/LICENSE 136e4f49dbf29942c572a3a8f6e88a77
|
@ -537,7 +537,6 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,H
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerSyncPeriod
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerTolerance
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerUpscaleForbiddenWindow
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerUseRESTClients
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,JobControllerConfiguration,ConcurrentJobSyncs
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,AttachDetachController
|
||||
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,CSRSigningController
|
||||
|
@ -2124,12 +2124,6 @@ function start-kube-controller-manager {
|
||||
if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
|
||||
params+=("--cluster-signing-duration=$CLUSTER_SIGNING_DURATION")
|
||||
fi
|
||||
# Disable using HPA metrics REST clients if metrics-server isn't enabled,
|
||||
# or if we want to explicitly disable it by setting HPA_USE_REST_CLIENT.
|
||||
if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]] ||
|
||||
[[ "${HPA_USE_REST_CLIENTS:-}" == "false" ]]; then
|
||||
params+=("--horizontal-pod-autoscaler-use-rest-clients=false")
|
||||
fi
|
||||
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
|
||||
params+=("--pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE")
|
||||
params+=("--pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE")
|
||||
|
@ -39,12 +39,7 @@ func startHPAController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients {
|
||||
// use the new-style clients if support for custom metrics is enabled
|
||||
return startHPAControllerWithRESTClient(ctx)
|
||||
}
|
||||
|
||||
return startHPAControllerWithLegacyClient(ctx)
|
||||
return startHPAControllerWithRESTClient(ctx)
|
||||
}
|
||||
|
||||
func startHPAControllerWithRESTClient(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
@ -67,18 +62,6 @@ func startHPAControllerWithRESTClient(ctx ControllerContext) (http.Handler, bool
|
||||
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||
}
|
||||
|
||||
func startHPAControllerWithLegacyClient(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(
|
||||
hpaClient,
|
||||
metrics.DefaultHeapsterNamespace,
|
||||
metrics.DefaultHeapsterScheme,
|
||||
metrics.DefaultHeapsterService,
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||
}
|
||||
|
||||
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (http.Handler, bool, error) {
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
|
@ -40,7 +40,6 @@ func (o *HPAControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", o.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.")
|
||||
fs.MarkDeprecated("horizontal-pod-autoscaler-downscale-delay", "This flag is currently no-op and will be deleted.")
|
||||
fs.Float64Var(&o.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", o.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.")
|
||||
fs.BoolVar(&o.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", o.HorizontalPodAutoscalerUseRESTClients, "If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.")
|
||||
fs.DurationVar(&o.HorizontalPodAutoscalerCPUInitializationPeriod.Duration, "horizontal-pod-autoscaler-cpu-initialization-period", o.HorizontalPodAutoscalerCPUInitializationPeriod.Duration, "The period after pod start when CPU samples might be skipped.")
|
||||
fs.MarkDeprecated("horizontal-pod-autoscaler-use-rest-clients", "Heapster is no longer supported as a source for Horizontal Pod Autoscaler metrics.")
|
||||
fs.DurationVar(&o.HorizontalPodAutoscalerInitialReadinessDelay.Duration, "horizontal-pod-autoscaler-initial-readiness-delay", o.HorizontalPodAutoscalerInitialReadinessDelay.Duration, "The period after pod start during which readiness changes will be treated as initial readiness.")
|
||||
@ -55,7 +54,6 @@ func (o *HPAControllerOptions) ApplyTo(cfg *poautosclerconfig.HPAControllerConfi
|
||||
cfg.HorizontalPodAutoscalerSyncPeriod = o.HorizontalPodAutoscalerSyncPeriod
|
||||
cfg.HorizontalPodAutoscalerDownscaleStabilizationWindow = o.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
cfg.HorizontalPodAutoscalerTolerance = o.HorizontalPodAutoscalerTolerance
|
||||
cfg.HorizontalPodAutoscalerUseRESTClients = o.HorizontalPodAutoscalerUseRESTClients
|
||||
cfg.HorizontalPodAutoscalerCPUInitializationPeriod = o.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
cfg.HorizontalPodAutoscalerInitialReadinessDelay = o.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
cfg.HorizontalPodAutoscalerUpscaleForbiddenWindow = o.HorizontalPodAutoscalerUpscaleForbiddenWindow
|
||||
|
@ -306,7 +306,6 @@ func TestAddFlags(t *testing.T) {
|
||||
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
|
||||
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
|
||||
HorizontalPodAutoscalerTolerance: 0.1,
|
||||
HorizontalPodAutoscalerUseRESTClients: true,
|
||||
},
|
||||
},
|
||||
JobController: &JobControllerOptions{
|
||||
@ -561,7 +560,6 @@ func TestApplyTo(t *testing.T) {
|
||||
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
|
||||
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
|
||||
HorizontalPodAutoscalerTolerance: 0.1,
|
||||
HorizontalPodAutoscalerUseRESTClients: true,
|
||||
},
|
||||
JobController: jobconfig.JobControllerConfiguration{
|
||||
ConcurrentJobSyncs: 5,
|
||||
|
2
go.mod
2
go.mod
@ -121,7 +121,6 @@ require (
|
||||
k8s.io/cri-api v0.0.0
|
||||
k8s.io/csi-translation-lib v0.0.0
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
k8s.io/heapster v1.2.0-beta.1
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kube-aggregator v0.0.0
|
||||
k8s.io/kube-controller-manager v0.0.0
|
||||
@ -492,7 +491,6 @@ replace (
|
||||
k8s.io/cri-api => ./staging/src/k8s.io/cri-api
|
||||
k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib
|
||||
k8s.io/gengo => k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1
|
||||
k8s.io/klog/v2 => k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator
|
||||
k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager
|
||||
|
2
go.sum
2
go.sum
@ -568,8 +568,6 @@ honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/heapster v1.2.0-beta.1 h1:lUsE/AHOMHpi3MLlBEkaU8Esxm5QhdyCrv1o7ot0s84=
|
||||
k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
|
@ -35,10 +35,6 @@ type HPAControllerConfiguration struct {
|
||||
// horizontalPodAutoscalerTolerance is the tolerance for when
|
||||
// resource usage suggests upscaling/downscaling
|
||||
HorizontalPodAutoscalerTolerance float64
|
||||
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
|
||||
// through the kube-aggregator when enabled, instead of using the legacy metrics client
|
||||
// through the API server proxy.
|
||||
HorizontalPodAutoscalerUseRESTClients bool
|
||||
// HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples
|
||||
// might be skipped.
|
||||
HorizontalPodAutoscalerCPUInitializationPeriod metav1.Duration
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// RecommendedDefaultHPAControllerConfiguration defaults a pointer to a
|
||||
@ -35,9 +34,6 @@ import (
|
||||
// run it in your wrapper struct of this type in its `SetDefaults_` method.
|
||||
func RecommendedDefaultHPAControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.HPAControllerConfiguration) {
|
||||
zero := metav1.Duration{}
|
||||
if obj.HorizontalPodAutoscalerUseRESTClients == nil {
|
||||
obj.HorizontalPodAutoscalerUseRESTClients = utilpointer.BoolPtr(true)
|
||||
}
|
||||
if obj.HorizontalPodAutoscalerSyncPeriod == zero {
|
||||
obj.HorizontalPodAutoscalerSyncPeriod = metav1.Duration{Duration: 15 * time.Second}
|
||||
}
|
||||
|
@ -86,9 +86,6 @@ func autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConf
|
||||
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow
|
||||
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
|
||||
if err := v1.Convert_Pointer_bool_To_bool(&in.HorizontalPodAutoscalerUseRESTClients, &out.HorizontalPodAutoscalerUseRESTClients, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
return nil
|
||||
@ -100,9 +97,6 @@ func autoConvert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConf
|
||||
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow
|
||||
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
|
||||
if err := v1.Convert_bool_To_Pointer_bool(&in.HorizontalPodAutoscalerUseRESTClients, &out.HorizontalPodAutoscalerUseRESTClients, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
return nil
|
||||
|
@ -1,802 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podautoscaler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
scalefake "k8s.io/client-go/scale/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/apps/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func (w fakeResponseWrapper) DoRaw(context.Context) ([]byte, error) {
|
||||
return w.raw, nil
|
||||
}
|
||||
|
||||
func (w fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||
return fakeResponseWrapper{raw: raw}
|
||||
}
|
||||
|
||||
type fakeResponseWrapper struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
type legacyTestCase struct {
|
||||
sync.Mutex
|
||||
minReplicas int32
|
||||
maxReplicas int32
|
||||
initialReplicas int32
|
||||
desiredReplicas int32
|
||||
|
||||
// CPU target utilization as a percentage of the requested resources.
|
||||
CPUTarget int32
|
||||
CPUCurrent int32
|
||||
verifyCPUCurrent bool
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
reportedPodReadiness []v1.ConditionStatus
|
||||
scaleUpdated bool
|
||||
statusUpdated bool
|
||||
eventCreated bool
|
||||
verifyEvents bool
|
||||
useMetricsAPI bool
|
||||
metricsTarget []autoscalingv2.MetricSpec
|
||||
// Channel with names of HPA objects which we have reconciled.
|
||||
processed chan string
|
||||
|
||||
// Target resource information.
|
||||
resource *fakeResource
|
||||
|
||||
// Last scale time
|
||||
lastScaleTime *metav1.Time
|
||||
recommendations []timestampedRecommendation
|
||||
|
||||
finished bool
|
||||
}
|
||||
|
||||
// Needs to be called under a lock.
|
||||
func (tc *legacyTestCase) computeCPUCurrent() {
|
||||
if len(tc.reportedLevels) != len(tc.reportedCPURequests) || len(tc.reportedLevels) == 0 {
|
||||
return
|
||||
}
|
||||
reported := 0
|
||||
for _, r := range tc.reportedLevels {
|
||||
reported += int(r)
|
||||
}
|
||||
requested := 0
|
||||
for _, req := range tc.reportedCPURequests {
|
||||
requested += int(req.MilliValue())
|
||||
}
|
||||
tc.CPUCurrent = int32(100 * reported / requested)
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *scalefake.FakeScaleClient) {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
podNamePrefix := "test-pod"
|
||||
labelSet := map[string]string{"name": podNamePrefix}
|
||||
selector := labels.SelectorFromSet(labelSet).String()
|
||||
|
||||
tc.Lock()
|
||||
|
||||
tc.scaleUpdated = false
|
||||
tc.statusUpdated = false
|
||||
tc.eventCreated = false
|
||||
tc.processed = make(chan string, 100)
|
||||
if tc.CPUCurrent == 0 {
|
||||
tc.computeCPUCurrent()
|
||||
}
|
||||
|
||||
if tc.resource == nil {
|
||||
tc.resource = &fakeResource{
|
||||
name: "test-rc",
|
||||
apiVersion: "v1",
|
||||
kind: "ReplicationController",
|
||||
}
|
||||
}
|
||||
tc.Unlock()
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv2.HorizontalPodAutoscalerList{
|
||||
Items: []autoscalingv2.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
||||
},
|
||||
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: tc.resource.kind,
|
||||
Name: tc.resource.name,
|
||||
APIVersion: tc.resource.apiVersion,
|
||||
},
|
||||
MinReplicas: &tc.minReplicas,
|
||||
MaxReplicas: tc.maxReplicas,
|
||||
},
|
||||
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: tc.initialReplicas,
|
||||
DesiredReplicas: tc.initialReplicas,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if tc.CPUTarget > 0.0 {
|
||||
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricSource{
|
||||
Name: v1.ResourceCPU,
|
||||
TargetAverageUtilization: &tc.CPUTarget,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(tc.metricsTarget) > 0 {
|
||||
obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...)
|
||||
}
|
||||
|
||||
if len(obj.Items[0].Spec.Metrics) == 0 {
|
||||
// manually add in the defaulting logic
|
||||
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricSource{
|
||||
Name: v1.ResourceCPU,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// and... convert to autoscaling v1 to return the right type
|
||||
objv1, err := unsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
return true, objv1, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.reportedPodReadiness != nil {
|
||||
podReadiness = tc.reportedPodReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: tc.reportedCPURequests[i],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
var heapsterRawMemResponse []byte
|
||||
|
||||
if tc.useMetricsAPI {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, cpu := range tc.reportedLevels {
|
||||
podMetric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: time.Now()},
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
metrics.Items = append(metrics.Items, podMetric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
} else {
|
||||
// only return the pods that we actually asked for
|
||||
proxyAction := action.(core.ProxyGetAction)
|
||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
||||
if len(pathParts) < 9 {
|
||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
||||
}
|
||||
|
||||
podNames := strings.Split(pathParts[7], ",")
|
||||
podPresent := make([]bool, len(tc.reportedLevels))
|
||||
for _, name := range podNames {
|
||||
if len(name) <= len(podNamePrefix)+1 {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
podPresent[num] = true
|
||||
}
|
||||
|
||||
timestamp := time.Now()
|
||||
metrics := heapster.MetricResultList{}
|
||||
for i, level := range tc.reportedLevels {
|
||||
if !podPresent[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
|
||||
LatestTimestamp: timestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
}
|
||||
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := func() *autoscalingv1.HorizontalPodAutoscaler {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
|
||||
assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
|
||||
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
|
||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
|
||||
if tc.verifyCPUCurrent {
|
||||
if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
}
|
||||
}
|
||||
tc.statusUpdated = true
|
||||
return obj
|
||||
}()
|
||||
// Every time we reconcile HPA object we are updating status.
|
||||
tc.processed <- obj.Name
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient := &scalefake.FakeScaleClient{}
|
||||
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
return fakeClient, fakeScaleClient
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas")
|
||||
assert.True(t, tc.statusUpdated, "the status should have been updated")
|
||||
if tc.verifyEvents {
|
||||
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas")
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
testClient, testScaleClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
eventClient := &fake.Clientset{}
|
||||
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
if tc.finished {
|
||||
return true, &v1.Event{}, nil
|
||||
}
|
||||
create, ok := action.(core.CreateAction)
|
||||
if !ok {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := create.GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.desiredReplicas), obj.Message)
|
||||
case "DesiredReplicasComputed":
|
||||
assert.Equal(t, fmt.Sprintf(
|
||||
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
|
||||
tc.desiredReplicas,
|
||||
(int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message)
|
||||
default:
|
||||
assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
|
||||
}
|
||||
}
|
||||
tc.eventCreated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||
defaultDownscaleStabilisationWindow := 5 * time.Minute
|
||||
|
||||
hpaController := NewHorizontalController(
|
||||
eventClient.CoreV1(),
|
||||
testScaleClient,
|
||||
testClient.AutoscalingV1(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
metricsClient,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
defaultDownscaleStabilisationWindow,
|
||||
defaultTestingTolerance,
|
||||
defaultTestingCPUInitializationPeriod,
|
||||
defaultTestingDelayOfInitialReadinessStatus,
|
||||
)
|
||||
hpaController.hpaListerSynced = alwaysReady
|
||||
|
||||
if tc.recommendations != nil {
|
||||
hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
go hpaController.Run(stop)
|
||||
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.Lock()
|
||||
tc.finished = true
|
||||
if tc.verifyEvents {
|
||||
tc.Unlock()
|
||||
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
||||
time.Sleep(2 * time.Second)
|
||||
} else {
|
||||
tc.Unlock()
|
||||
}
|
||||
tc.verifyResults(t)
|
||||
|
||||
}
|
||||
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: false,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 30,
|
||||
CPUCurrent: 40,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{400, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpDeployment(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
resource: &fakeResource{
|
||||
name: "test-dep",
|
||||
apiVersion: "apps/v1",
|
||||
kind: "Deployment",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpReplicaSet(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
resource: &fakeResource{
|
||||
name: "test-replicaset",
|
||||
apiVersion: "apps/v1",
|
||||
kind: "ReplicaSet",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{20, 10, 30},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCMUnreadyNoLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{50, 10, 30},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{50, 15, 30},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("20.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{12, 12, 12, 12, 12},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 2,
|
||||
CPUTarget: 50,
|
||||
CPUCurrent: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacySuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 4,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 2,
|
||||
verifyCPUCurrent: false,
|
||||
reportedLevels: []uint64{0, 0, 0, 0},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
lastScaleTime: &time,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 6,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 50,
|
||||
reportedLevels: []uint64{8000, 9500, 1000},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
useMetricsAPI: true,
|
||||
lastScaleTime: &time,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TODO: add more tests
|
@ -1,670 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podautoscaler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type legacyReplicaCalcTestCase struct {
|
||||
currentReplicas int32
|
||||
expectedReplicas int32
|
||||
expectedError error
|
||||
|
||||
timestamp time.Time
|
||||
|
||||
resource *resourceInfo
|
||||
metric *metricInfo
|
||||
|
||||
podReadiness []v1.ConditionStatus
|
||||
}
|
||||
|
||||
func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.podReadiness != nil {
|
||||
podReadiness = tc.podReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{}, {}},
|
||||
},
|
||||
}
|
||||
|
||||
if tc.resource != nil && i < len(tc.resource.requests) {
|
||||
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
}
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
var heapsterRawMemResponse []byte
|
||||
|
||||
if tc.resource != nil {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, resValue := range tc.resource.levels {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
if len(tc.resource.podNames) > i {
|
||||
podName = tc.resource.podNames[i]
|
||||
}
|
||||
podMetric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||
}
|
||||
for i, m := range resValue {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: v1.ResourceList{
|
||||
tc.resource.name: *resource.NewMilliQuantity(m, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
}
|
||||
metrics.Items = append(metrics.Items, podMetric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
} else {
|
||||
// only return the pods that we actually asked for
|
||||
proxyAction := action.(core.ProxyGetAction)
|
||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
||||
if len(pathParts) < 9 {
|
||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
||||
}
|
||||
|
||||
podNames := strings.Split(pathParts[7], ",")
|
||||
podPresent := make([]bool, len(tc.metric.levels))
|
||||
for _, name := range podNames {
|
||||
if len(name) <= len(podNamePrefix)+1 {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
podPresent[num] = true
|
||||
}
|
||||
|
||||
timestamp := tc.timestamp
|
||||
metrics := heapster.MetricResultList{}
|
||||
for i, level := range tc.metric.levels {
|
||||
if !podPresent[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
floatVal := float64(tc.metric.levels[i]) / 1000.0
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: uint64(level), FloatValue: &floatVal}},
|
||||
LatestTimestamp: timestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
}
|
||||
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||
informer := informerFactory.Core().V1().Pods()
|
||||
|
||||
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
|
||||
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
if !cache.WaitForNamedCacheSync("HPA", stop, informer.Informer().HasSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": podNamePrefix},
|
||||
})
|
||||
if err != nil {
|
||||
require.Nil(t, err, "something went horribly wrong...")
|
||||
}
|
||||
|
||||
if tc.resource != nil {
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, "")
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
|
||||
} else {
|
||||
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector, nil)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("no metrics returned matched known pods"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100),
|
||||
podNames: []string{"an-older-pod-name"},
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 50,
|
||||
expectedValue: numContainersPerPod * 500,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(400, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 40,
|
||||
expectedValue: numContainersPerPod * 400,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{20000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 20000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyNoLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 6,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 30000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 7,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000, 15000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 31666,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: numContainersPerPod * 280,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{12000, 12000, 12000, 12000, 12000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 12000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
expectedValue: numContainersPerPod * 300,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcTolerance(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
levels: makePodMetricLevels(1010, 1030, 1020),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 102,
|
||||
expectedValue: numContainersPerPod * 1020,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{20000, 21000, 21000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 20666,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 24,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(4000, 9500, 3000, 7000, 3200, 2000),
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 587,
|
||||
expectedValue: numContainersPerPod * 5875,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(400, 95),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 24,
|
||||
expectedValue: 495, // numContainersPerPod * 247, for sufficiently large values of 247
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("missing request for"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{},
|
||||
levels: makePodMetricLevels(200),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(1000),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 100,
|
||||
expectedValue: numContainersPerPod * 1000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(1900),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 190,
|
||||
expectedValue: numContainersPerPod * 1900,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(600),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 450),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 45,
|
||||
expectedValue: numContainersPerPod * 450,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 2000),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 200,
|
||||
expectedValue: numContainersPerPod * 2000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 100, 100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
expectedValue: numContainersPerPod * 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TestComputedToleranceAlgImplementation is a regression test which
|
||||
// back-calculates a minimal percentage for downscaling based on a small percentage
|
||||
// increase in pod utilization which is calibrated against the tolerance value.
|
||||
func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
|
||||
startPods := int32(10)
|
||||
// 150 mCPU per pod.
|
||||
totalUsedCPUOfAllPods := int64(startPods * 150)
|
||||
// Each pod starts out asking for 2X what is really needed.
|
||||
// This means we will have a 50% ratio of used/requested
|
||||
totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
|
||||
requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
|
||||
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
|
||||
perPodRequested := totalRequestedCPUOfAllPods / startPods
|
||||
|
||||
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
|
||||
target := math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .01
|
||||
finalCPUPercentTarget := int32(target * 100)
|
||||
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
|
||||
|
||||
// i.e. .60 * 20 -> scaled down expectation.
|
||||
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
||||
|
||||
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: startPods,
|
||||
expectedReplicas: finalPods,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
levels: makePodMetricLevels(
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
),
|
||||
requests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
},
|
||||
|
||||
targetUtilization: finalCPUPercentTarget,
|
||||
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
|
||||
expectedValue: numContainersPerPod * totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
}
|
||||
|
||||
tc.runTest(t)
|
||||
|
||||
// Reuse the data structure above, now testing "unscaling".
|
||||
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
||||
target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
|
||||
finalCPUPercentTarget = int32(target * 100)
|
||||
tc.resource.targetUtilization = finalCPUPercentTarget
|
||||
tc.currentReplicas = startPods
|
||||
tc.expectedReplicas = startPods
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TODO: add more tests
|
@ -42,6 +42,14 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
||||
|
||||
// timestamp is used for establishing order on metricPoints
|
||||
type metricPoint struct {
|
||||
level uint64
|
||||
timestamp int
|
||||
}
|
||||
|
||||
type restClientTestCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
@ -424,3 +432,7 @@ func TestRESTClientContainerCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func offsetTimestampBy(t int) time.Time {
|
||||
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
||||
}
|
@ -1,229 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
"k8s.io/klog/v2"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
heapsterDefaultMetricWindow = time.Minute
|
||||
)
|
||||
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
type HeapsterMetricsClient struct {
|
||||
services v1core.ServiceInterface
|
||||
podsGetter v1core.PodsGetter
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
heapsterPort string
|
||||
}
|
||||
|
||||
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
|
||||
return &HeapsterMetricsClient{
|
||||
services: client.CoreV1().Services(namespace),
|
||||
podsGetter: client.CoreV1(),
|
||||
heapsterScheme: scheme,
|
||||
heapsterService: service,
|
||||
heapsterPort: port,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector, container string) (PodMetricsInfo, time.Time, error) {
|
||||
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
|
||||
params := map[string]string{"labelSelector": selector.String()}
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
|
||||
DoRaw(context.TODO())
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
|
||||
}
|
||||
|
||||
klog.V(8).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
|
||||
for _, m := range metrics.Items {
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
if container == "" || container == c.Name {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: int64(podSum),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
|
||||
}
|
||||
|
||||
podNames := make([]string, len(podList.Items))
|
||||
for i, pod := range podList.Items {
|
||||
podNames[i] = pod.Name
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
startTime := now.Add(heapsterQueryStart)
|
||||
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
|
||||
namespace,
|
||||
strings.Join(podNames, ","),
|
||||
metricName)
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw(context.TODO())
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod metrics: %v", err)
|
||||
}
|
||||
|
||||
var metrics heapster.MetricResultList
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
if len(metrics.Items) != len(podNames) {
|
||||
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
|
||||
// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
|
||||
// does not cover the "missing metric entry" case)
|
||||
return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
|
||||
}
|
||||
|
||||
var timestamp *time.Time
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
for i, podMetrics := range metrics.Items {
|
||||
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
|
||||
if hadMetrics {
|
||||
res[podNames[i]] = PodMetric{
|
||||
Timestamp: podTimestamp,
|
||||
Window: heapsterDefaultMetricWindow,
|
||||
Value: int64(val),
|
||||
}
|
||||
|
||||
if timestamp == nil || podTimestamp.Before(*timestamp) {
|
||||
timestamp = &podTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timestamp == nil {
|
||||
timestamp = &time.Time{}
|
||||
}
|
||||
|
||||
return res, *timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
|
||||
return 0, time.Time{}, fmt.Errorf("object metrics are not yet supported")
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetExternalMetric(metricName, namespace string, selector labels.Selector) ([]int64, time.Time, error) {
|
||||
return nil, time.Time{}, fmt.Errorf("external metrics aren't supported")
|
||||
}
|
||||
|
||||
func collapseTimeSamples(metrics heapster.MetricResult, duration time.Duration) (int64, time.Time, bool) {
|
||||
floatSum := float64(0)
|
||||
intSum := int64(0)
|
||||
intSumCount := 0
|
||||
floatSumCount := 0
|
||||
|
||||
var newest *heapster.MetricPoint // creation time of the newest sample for this pod
|
||||
for i, metricPoint := range metrics.Metrics {
|
||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||
newest = &metrics.Metrics[i]
|
||||
}
|
||||
}
|
||||
if newest != nil {
|
||||
for _, metricPoint := range metrics.Metrics {
|
||||
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
|
||||
intSum += int64(metricPoint.Value)
|
||||
intSumCount++
|
||||
if metricPoint.FloatValue != nil {
|
||||
floatSum += *metricPoint.FloatValue
|
||||
floatSumCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newest.FloatValue != nil {
|
||||
return int64(floatSum / float64(floatSumCount) * 1000), newest.Timestamp, true
|
||||
} else {
|
||||
return (intSum * 1000) / int64(intSumCount), newest.Timestamp, true
|
||||
}
|
||||
}
|
||||
|
||||
return 0, time.Time{}, false
|
||||
}
|
@ -1,395 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
||||
|
||||
func (w fakeResponseWrapper) DoRaw(context.Context) ([]byte, error) {
|
||||
return w.raw, nil
|
||||
}
|
||||
|
||||
func (w fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||
return fakeResponseWrapper{raw: raw}
|
||||
}
|
||||
|
||||
type fakeResponseWrapper struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
// timestamp is used for establishing order on metricPoints
|
||||
type metricPoint struct {
|
||||
level uint64
|
||||
timestamp int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
|
||||
replicas int
|
||||
targetTimestamp int
|
||||
window time.Duration
|
||||
reportedMetricsPoints [][]metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
metricSelector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
namespace := "test-namespace"
|
||||
tc.namespace = namespace
|
||||
podNamePrefix := "test-pod"
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
tc.selector = labels.SelectorFromSet(podLabels)
|
||||
|
||||
// it's a resource test if we have a resource name
|
||||
isResource := len(tc.resourceName) > 0
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
if isResource {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, containers := range tc.reportedPodMetrics {
|
||||
metric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
|
||||
Window: metav1.Duration{Duration: tc.window},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
}
|
||||
metric.Containers = append(metric.Containers, cm)
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
} else {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := heapster.MetricResultList{}
|
||||
var latestTimestamp time.Time
|
||||
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||
var heapsterMetricPoints []heapster.MetricPoint
|
||||
for _, reportedMetricPoint := range reportedMetricPoints {
|
||||
timestamp := offsetTimestampBy(reportedMetricPoint.timestamp)
|
||||
if latestTimestamp.Before(timestamp) {
|
||||
latestTimestamp = timestamp
|
||||
}
|
||||
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
|
||||
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
||||
}
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: heapsterMetricPoints,
|
||||
LatestTimestamp: latestTimestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
}
|
||||
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(request),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
||||
if tc.desiredError != nil {
|
||||
assert.Error(t, err, "there should be an error retrieving the metrics")
|
||||
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
if len(metrics) != len(tc.desiredMetricValues) {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
} else {
|
||||
for k, m := range metrics {
|
||||
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
|
||||
m.Window != tc.desiredMetricValues[k].Window ||
|
||||
m.Value != tc.desiredMetricValues[k].Value {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
||||
isResource := len(tc.resourceName) > 0
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector, "")
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQPS(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSumEqualZero(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMoreMetrics(t *testing.T) {
|
||||
targetTimestamp := 10
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 5,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-3": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-4": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMissingMetrics(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 4000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{4000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsMissingMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSuperfluousMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
|
||||
metricName: "qps",
|
||||
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedMetricsPoints: [][]metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsEmptyEntries(t *testing.T) {
|
||||
targetTimestamp := 4
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
metricName: "qps",
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 4000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 2000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUZeroReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 0,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func offsetTimestampBy(t int) time.Time {
|
||||
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
||||
}
|
@ -328,10 +328,6 @@ type HPAControllerConfiguration struct {
|
||||
// HorizontalPodAutoscalerTolerance is the tolerance for when
|
||||
// resource usage suggests upscaling/downscaling
|
||||
HorizontalPodAutoscalerTolerance float64
|
||||
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
|
||||
// through the kube-aggregator when enabled, instead of using the legacy metrics client
|
||||
// through the API server proxy.
|
||||
HorizontalPodAutoscalerUseRESTClients *bool
|
||||
// HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples
|
||||
// might be skipped.
|
||||
HorizontalPodAutoscalerCPUInitializationPeriod metav1.Duration
|
||||
|
@ -243,11 +243,6 @@ func (in *HPAControllerConfiguration) DeepCopyInto(out *HPAControllerConfigurati
|
||||
out.HorizontalPodAutoscalerUpscaleForbiddenWindow = in.HorizontalPodAutoscalerUpscaleForbiddenWindow
|
||||
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow
|
||||
if in.HorizontalPodAutoscalerUseRESTClients != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerUseRESTClients, &out.HorizontalPodAutoscalerUseRESTClients
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
return
|
||||
@ -295,7 +290,7 @@ func (in *KubeControllerManagerConfiguration) DeepCopyInto(out *KubeControllerMa
|
||||
out.EndpointSliceController = in.EndpointSliceController
|
||||
out.EndpointSliceMirroringController = in.EndpointSliceMirroringController
|
||||
in.GarbageCollectorController.DeepCopyInto(&out.GarbageCollectorController)
|
||||
in.HPAController.DeepCopyInto(&out.HPAController)
|
||||
out.HPAController = in.HPAController
|
||||
out.JobController = in.JobController
|
||||
out.CronJobController = in.CronJobController
|
||||
out.NamespaceController = in.NamespaceController
|
||||
|
201
vendor/k8s.io/heapster/LICENSE
generated
vendored
201
vendor/k8s.io/heapster/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
49
vendor/k8s.io/heapster/metrics/api/v1/types/historical_types.go
generated
vendored
49
vendor/k8s.io/heapster/metrics/api/v1/types/historical_types.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricValue is either a floating point value or an unsigned integer value
|
||||
type MetricValue struct {
|
||||
IntValue *int64 `json:"intValue,omitempty"`
|
||||
FloatValue *float64 `json:"floatValue,omitempty"`
|
||||
}
|
||||
|
||||
// MetricAggregationBucket holds information about various aggregations across a single bucket of time
|
||||
type MetricAggregationBucket struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Count *uint64 `json:"count,omitempty"`
|
||||
|
||||
Average *MetricValue `json:"average,omitempty"`
|
||||
Maximum *MetricValue `json:"maximum,omitempty"`
|
||||
Minimum *MetricValue `json:"minimum,omitempty"`
|
||||
Median *MetricValue `json:"median,omitempty"`
|
||||
|
||||
Percentiles map[string]MetricValue `json:"percentiles,omitempty"`
|
||||
}
|
||||
|
||||
// MetricAggregationResult holds a series of MetricAggregationBuckets of a particular size
|
||||
type MetricAggregationResult struct {
|
||||
Buckets []MetricAggregationBucket `json:"buckets"`
|
||||
BucketSize time.Duration `json:"bucketSize"`
|
||||
}
|
||||
|
||||
// MetricAggregationResultList is a list of MetricAggregationResults, each for a different object
|
||||
type MetricAggregationResultList struct {
|
||||
Items []MetricAggregationResult `json:"items"`
|
||||
}
|
63
vendor/k8s.io/heapster/metrics/api/v1/types/model_types.go
generated
vendored
63
vendor/k8s.io/heapster/metrics/api/v1/types/model_types.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type MetricPoint struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Value uint64 `json:"value"`
|
||||
// This will be populated only for float custom metrics. In that case
|
||||
// "value" will be zero. This is a temporary hack. Overall most likely
|
||||
// we will need a new api versioned in the similar way as K8S api.
|
||||
FloatValue *float64 `json:"floatValue,omitempty"`
|
||||
}
|
||||
|
||||
type MetricResult struct {
|
||||
Metrics []MetricPoint `json:"metrics"`
|
||||
LatestTimestamp time.Time `json:"latestTimestamp"`
|
||||
}
|
||||
|
||||
type MetricResultList struct {
|
||||
Items []MetricResult `json:"items"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Average uint64 `json:"average"`
|
||||
NinetyFifth uint64 `json:"percentile"`
|
||||
Max uint64 `json:"max"`
|
||||
}
|
||||
|
||||
type ExternalStatBundle struct {
|
||||
Minute Stats `json:"minute"`
|
||||
Hour Stats `json:"hour"`
|
||||
Day Stats `json:"day"`
|
||||
}
|
||||
|
||||
type StatsResponse struct {
|
||||
// Uptime is in seconds
|
||||
Uptime uint64 `json:"uptime"`
|
||||
Stats map[string]ExternalStatBundle `json:"stats"`
|
||||
}
|
||||
|
||||
// An ExternalEntityListEntry represents the latest CPU and Memory usage of a model entity.
|
||||
// A model entity can be a Pod, a Container, a Namespace or a Node.
|
||||
type ExternalEntityListEntry struct {
|
||||
Name string `json:"name"`
|
||||
CPUUsage uint64 `json:"cpuUsage"`
|
||||
MemUsage uint64 `json:"memUsage"`
|
||||
}
|
83
vendor/k8s.io/heapster/metrics/api/v1/types/types.go
generated
vendored
83
vendor/k8s.io/heapster/metrics/api/v1/types/types.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timeseries represents a set of metrics for the same target object
|
||||
// (typically a container).
|
||||
type Timeseries struct {
|
||||
// Map of metric names to their values.
|
||||
Metrics map[string][]Point `json:"metrics"`
|
||||
|
||||
// Common labels for all metrics.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// Point represent a metric value.
|
||||
type Point struct {
|
||||
// The start and end time for which this data is representative.
|
||||
Start time.Time `json:"start"`
|
||||
End time.Time `json:"end"`
|
||||
|
||||
// Labels specific to this data point.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
// The value of the metric.
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// TimeseriesSchema represents all the metrics and labels.
|
||||
type TimeseriesSchema struct {
|
||||
// All the metrics handled by heapster.
|
||||
Metrics []MetricDescriptor `json:"metrics,omitempty"`
|
||||
// Labels that are common to all metrics.
|
||||
CommonLabels []LabelDescriptor `json:"common_labels,omitempty"`
|
||||
// Labels that are present only for containers in pods.
|
||||
// A container metric belongs to a pod is "pod_name" label is set.
|
||||
PodLabels []LabelDescriptor `json:"pod_labels,omitempty"`
|
||||
}
|
||||
|
||||
// To maintain stable api for GKE.
|
||||
|
||||
type MetricDescriptor struct {
|
||||
// The unique name of the metric.
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// Description of the metric.
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// Descriptor of the labels specific to this metric.
|
||||
Labels []LabelDescriptor `json:"labels,omitempty"`
|
||||
|
||||
// Type and value of metric data.
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// The type of value returned as part of this metric.
|
||||
ValueType string `json:"value_type,omitempty"`
|
||||
|
||||
// The units of the value returned as part of this metric.
|
||||
Units string `json:"units,omitempty"`
|
||||
}
|
||||
|
||||
type LabelDescriptor struct {
|
||||
// Key to use for the label.
|
||||
Key string `json:"key,omitempty"`
|
||||
|
||||
// Description of the label.
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -1956,9 +1956,6 @@ k8s.io/gengo/generator
|
||||
k8s.io/gengo/namer
|
||||
k8s.io/gengo/parser
|
||||
k8s.io/gengo/types
|
||||
# k8s.io/heapster v1.2.0-beta.1 => k8s.io/heapster v1.2.0-beta.1
|
||||
## explicit
|
||||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog/v2 v2.9.0 => k8s.io/klog/v2 v2.9.0
|
||||
## explicit
|
||||
k8s.io/klog/v2
|
||||
@ -2676,7 +2673,6 @@ sigs.k8s.io/yaml
|
||||
# k8s.io/cri-api => ./staging/src/k8s.io/cri-api
|
||||
# k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib
|
||||
# k8s.io/gengo => k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
# k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1
|
||||
# k8s.io/klog/v2 => k8s.io/klog/v2 v2.9.0
|
||||
# k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator
|
||||
# k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager
|
||||
|
Loading…
Reference in New Issue
Block a user