From a9593d634c6a053848413e600dadbf974627515f Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 19 Jul 2022 20:54:13 -0400 Subject: [PATCH] Generate and format files - Run hack/update-codegen.sh - Run hack/update-generated-device-plugin.sh - Run hack/update-generated-protobuf.sh - Run hack/update-generated-runtime.sh - Run hack/update-generated-swagger-docs.sh - Run hack/update-openapi-spec.sh - Run hack/update-gofmt.sh Signed-off-by: Davanum Srinivas --- api/openapi-spec/swagger.json | 16 +- api/openapi-spec/v3/api__v1_openapi.json | 10 +- ...issionregistration.k8s.io__v1_openapi.json | 2 +- ...pis__apiextensions.k8s.io__v1_openapi.json | 2 +- .../v3/apis__apps__v1_openapi.json | 6 +- .../v3/apis__autoscaling__v1_openapi.json | 2 +- .../v3/apis__autoscaling__v2_openapi.json | 4 +- .../apis__autoscaling__v2beta2_openapi.json | 4 +- .../v3/apis__batch__v1_openapi.json | 4 +- ...apis__certificates.k8s.io__v1_openapi.json | 2 +- ...apis__coordination.k8s.io__v1_openapi.json | 2 +- .../apis__discovery.k8s.io__v1_openapi.json | 2 +- .../v3/apis__events.k8s.io__v1_openapi.json | 2 +- ...rol.apiserver.k8s.io__v1beta1_openapi.json | 4 +- ...rol.apiserver.k8s.io__v1beta2_openapi.json | 4 +- ...al.apiserver.k8s.io__v1alpha1_openapi.json | 2 +- .../apis__networking.k8s.io__v1_openapi.json | 2 +- .../v3/apis__node.k8s.io__v1_openapi.json | 4 +- .../v3/apis__policy__v1_openapi.json | 2 +- ...rbac.authorization.k8s.io__v1_openapi.json | 2 +- .../apis__scheduling.k8s.io__v1_openapi.json | 2 +- .../v3/apis__storage.k8s.io__v1_openapi.json | 4 +- ...apis__storage.k8s.io__v1beta1_openapi.json | 4 +- cmd/kube-apiserver/app/testing/testserver.go | 5 +- cmd/kube-controller-manager/app/apps.go | 1 - .../app/autoscaling.go | 1 - cmd/kube-controller-manager/app/batch.go | 1 - .../app/certificates.go | 1 - .../app/controllermanager.go | 7 +- cmd/kube-controller-manager/app/core.go | 1 - cmd/kube-controller-manager/app/discovery.go | 1 - .../app/options/options.go | 1 - cmd/kube-controller-manager/app/policy.go | 1 - .../app/testing/testserver.go | 5 +- cmd/kube-scheduler/app/testing/testserver.go | 5 +- cmd/kubeadm/app/apis/kubeadm/v1beta2/doc.go | 297 ++++++++-------- cmd/kubeadm/app/apis/kubeadm/v1beta3/doc.go | 322 +++++++++--------- .../kubeadm/validation/validation_test.go | 2 +- cmd/kubeadm/app/cmd/init.go | 3 +- cmd/kubeadm/app/cmd/join.go | 3 +- cmd/kubeadm/app/cmd/util/join.go | 6 +- cmd/kubeadm/app/phases/copycerts/copycerts.go | 4 +- cmd/kubeadm/app/phases/kubelet/flags.go | 4 +- cmd/kubeadm/app/util/config/cluster.go | 3 +- cmd/kubeadm/app/util/version.go | 13 +- cmd/kubelet/app/options/options.go | 7 +- cmd/kubelet/app/server.go | 8 +- pkg/apis/apps/types.go | 5 +- pkg/apis/autoscaling/helpers.go | 18 +- pkg/apis/certificates/types.go | 3 +- pkg/apis/core/types.go | 43 +-- pkg/apis/core/validation/validation.go | 5 +- pkg/apis/flowcontrol/types.go | 4 +- pkg/apis/flowcontrol/validation/validation.go | 8 +- pkg/apis/policy/validation/validation.go | 5 +- .../certificates/authority/policies.go | 22 +- pkg/controller/controller_ref_manager.go | 31 +- pkg/controller/controller_utils.go | 36 +- pkg/controller/cronjob/utils.go | 4 +- pkg/controller/daemon/daemon_controller.go | 4 +- pkg/controller/deployment/sync.go | 8 +- .../deployment/util/deployment_util.go | 9 +- .../deployment/util/deployment_util_test.go | 2 +- pkg/controller/disruption/disruption_test.go | 2 +- pkg/controller/endpointslice/reconciler.go | 14 +- .../endpointslice/topologycache/sliceinfo.go | 10 +- pkg/controller/job/job_controller.go | 24 +- .../deletion/namespaced_resources_deleter.go | 16 +- pkg/controller/nodeipam/ipam/doc.go | 20 +- .../node_lifecycle_controller.go | 18 +- .../podautoscaler/horizontal_test.go | 15 +- pkg/controller/statefulset/stateful_set.go | 3 +- .../statefulset/stateful_set_utils.go | 2 +- .../endpointslice/endpointslice_tracker.go | 10 +- .../attachdetach/attach_detach_controller.go | 5 +- .../volume/attachdetach/metrics/metrics.go | 9 +- .../desired_state_of_world_populator.go | 8 +- .../volume/persistentvolume/binder_test.go | 31 +- .../volume/persistentvolume/delete_test.go | 23 +- .../volume/persistentvolume/framework_test.go | 64 ++-- .../volume/persistentvolume/index.go | 26 +- .../volume/persistentvolume/provision_test.go | 23 +- .../volume/persistentvolume/pv_controller.go | 16 +- .../volume/persistentvolume/recycle_test.go | 23 +- .../persistentvolume/testing/testing.go | 30 +- pkg/controlplane/instance.go | 3 +- pkg/controlplane/reconcilers/instancecount.go | 18 +- pkg/controlplane/reconcilers/lease.go | 6 +- pkg/credentialprovider/config.go | 6 +- pkg/credentialprovider/gcp/metadata.go | 5 +- pkg/credentialprovider/keyring.go | 11 +- pkg/credentialprovider/plugin/plugin.go | 2 +- pkg/credentialprovider/plugins.go | 7 +- pkg/generated/openapi/zz_generated.openapi.go | 26 +- pkg/kubeapiserver/options/admission.go | 11 +- pkg/kubelet/cm/container_manager_linux.go | 1 - pkg/kubelet/cm/cpumanager/cpu_assignment.go | 42 +-- pkg/kubelet/cm/cpumanager/policy_static.go | 30 +- .../cm/node_container_manager_linux.go | 2 +- .../topologymanager/fake_topology_manager.go | 2 +- pkg/kubelet/cm/topologymanager/policy.go | 25 +- .../cm/topologymanager/topology_manager.go | 2 +- pkg/kubelet/config/config.go | 10 +- pkg/kubelet/configmap/configmap_manager.go | 16 +- pkg/kubelet/container/cache.go | 4 +- pkg/kubelet/cri/streaming/server_test.go | 1 - pkg/kubelet/eviction/helpers.go | 7 +- pkg/kubelet/kubelet.go | 57 ++-- pkg/kubelet/kubelet_network_linux.go | 8 +- pkg/kubelet/kuberuntime/kuberuntime_gc.go | 8 +- pkg/kubelet/kuberuntime/logs/logs.go | 10 +- pkg/kubelet/lifecycle/predicate.go | 3 +- pkg/kubelet/pluginmanager/cache/types.go | 34 +- .../pluginmanager/reconciler/reconciler.go | 14 +- pkg/kubelet/pod_workers.go | 54 +-- pkg/kubelet/reason_cache.go | 9 +- .../runtimeclass/testing/fake_manager.go | 3 +- pkg/kubelet/secret/secret_manager.go | 16 +- pkg/kubelet/server/auth.go | 7 +- pkg/kubelet/sysctl/util.go | 5 +- .../util/manager/cache_based_manager.go | 10 +- .../util/manager/watch_based_manager.go | 6 +- .../desired_state_of_world_populator.go | 8 +- .../volumemanager/reconciler/reconciler.go | 26 +- pkg/kubelet/volumemanager/volume_manager.go | 7 +- pkg/kubelet/winstats/version.go | 6 +- pkg/printers/tablegenerator.go | 4 +- pkg/probe/http/http.go | 6 +- pkg/proxy/endpoints.go | 2 + pkg/proxy/ipvs/proxier.go | 9 +- pkg/proxy/service.go | 15 +- pkg/proxy/winkernel/proxier.go | 8 +- pkg/quota/v1/evaluator/core/pods.go | 4 +- pkg/quota/v1/evaluator/core/services.go | 2 +- .../storageversion/strategy.go | 2 +- pkg/registry/apps/statefulset/strategy.go | 7 +- .../core/service/ipallocator/allocator.go | 22 +- .../portallocator/controller/repair.go | 4 +- .../core/service/portallocator/operation.go | 12 +- pkg/registry/core/service/strategy.go | 7 +- pkg/registry/flowcontrol/ensurer/strategy.go | 24 +- .../framework/plugins/helper/shape_score.go | 6 +- .../plugins/interpodaffinity/filtering.go | 2 +- .../framework/plugins/noderesources/fit.go | 31 +- .../plugins/volumebinding/assume_cache.go | 6 +- .../framework/plugins/volumebinding/binder.go | 40 +-- .../framework/preemption/preemption.go | 22 +- pkg/scheduler/internal/cache/interface.go | 34 +- .../internal/queue/scheduling_queue.go | 10 +- pkg/scheduler/schedule_one_test.go | 8 +- pkg/scheduler/testing/workload_prep.go | 8 +- pkg/serviceaccount/jwt_test.go | 12 +- pkg/util/coverage/fake_test_deps.go | 1 + pkg/util/ipset/ipset.go | 20 +- pkg/util/ipvs/testing/fake.go | 22 +- pkg/volume/awsebs/aws_ebs_block.go | 3 +- pkg/volume/azuredd/azure_dd_block.go | 3 +- pkg/volume/cinder/cinder_block.go | 3 +- pkg/volume/csi/csi_mounter.go | 2 +- pkg/volume/csi/csi_plugin.go | 2 +- pkg/volume/fc/fc.go | 6 +- pkg/volume/flocker/flocker.go | 12 +- pkg/volume/glusterfs/glusterfs.go | 8 +- pkg/volume/glusterfs/glusterfs_minmax.go | 14 +- pkg/volume/iscsi/iscsi.go | 2 +- pkg/volume/plugins.go | 6 +- pkg/volume/portworx/portworx_util.go | 16 +- pkg/volume/rbd/attacher.go | 11 +- pkg/volume/testing/testing.go | 5 +- pkg/volume/util/atomic_writer.go | 64 ++-- pkg/volume/util/device_util.go | 4 +- pkg/volume/util/device_util_linux.go | 9 +- .../util/hostutil/hostutil_unsupported.go | 2 +- pkg/volume/util/io_util.go | 2 +- .../util/recyclerclient/recycler_client.go | 6 +- pkg/volume/util/util.go | 2 +- .../volume_path_handler_linux.go | 6 +- plugin/pkg/admission/imagepolicy/admission.go | 42 +-- .../admission/podnodeselector/admission.go | 7 +- .../token/bootstrap/bootstrap.go | 36 +- plugin/pkg/auth/authorizer/node/graph.go | 16 +- .../auth/authorizer/node/node_authorizer.go | 22 +- .../v1alpha1/generated.proto | 2 +- .../api/apiserverinternal/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../src/k8s.io/api/apps/v1/generated.proto | 5 +- staging/src/k8s.io/api/apps/v1/types.go | 5 +- .../apps/v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.proto | 5 +- staging/src/k8s.io/api/apps/v1beta1/types.go | 5 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.proto | 5 +- staging/src/k8s.io/api/apps/v1beta2/types.go | 5 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../src/k8s.io/api/certificates/v1/types.go | 4 +- .../k8s.io/api/certificates/v1beta1/types.go | 3 +- .../src/k8s.io/api/core/v1/generated.proto | 42 ++- staging/src/k8s.io/api/core/v1/toleration.go | 9 +- staging/src/k8s.io/api/core/v1/types.go | 42 ++- .../core/v1/types_swagger_doc_generated.go | 6 +- .../api/flowcontrol/v1alpha1/generated.proto | 4 +- .../k8s.io/api/flowcontrol/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../api/flowcontrol/v1beta1/generated.proto | 4 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/flowcontrol/v1beta2/generated.proto | 4 +- .../k8s.io/api/flowcontrol/v1beta2/types.go | 4 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../pkg/apiserver/schema/cel/compilation.go | 7 +- .../pkg/apiserver/schema/cel/library/lists.go | 56 ++- .../pkg/apiserver/schema/cel/library/regex.go | 17 +- .../pkg/apiserver/schema/cel/library/urls.go | 88 ++--- .../schema/defaulting/surroundingobject.go | 24 +- .../pkg/apiserver/schema/validation.go | 16 +- .../clientset/clientset/fake/register.go | 14 +- .../clientset/clientset/scheme/register.go | 14 +- .../clientset/deprecated/fake/register.go | 14 +- .../clientset/deprecated/scheme/register.go | 14 +- .../pkg/cmd/server/testing/testserver.go | 5 +- .../pkg/controller/openapi/builder/builder.go | 23 +- .../generated/openapi/zz_generated.openapi.go | 4 +- .../test/integration/conversion/webhook.go | 3 +- .../pkg/api/apitesting/roundtrip/roundtrip.go | 23 +- .../apimachinery/pkg/api/meta/conditions.go | 6 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 +- .../meta/testrestmapper/test_restmapper.go | 1 + .../pkg/api/resource/generated.proto | 12 +- .../apimachinery/pkg/api/resource/quantity.go | 20 +- .../pkg/apis/meta/v1/generated.proto | 21 +- .../apimachinery/pkg/apis/meta/v1/types.go | 30 +- .../apimachinery/pkg/labels/selector.go | 55 +-- .../apimachinery/pkg/runtime/allocator.go | 10 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 13 +- .../apimachinery/pkg/runtime/generated.proto | 59 ++-- .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 59 ++-- .../k8s.io/apimachinery/pkg/types/nodename.go | 24 +- .../apimachinery/pkg/util/framer/framer.go | 8 +- .../util/httpstream/spdy/roundtripper_test.go | 6 +- .../apimachinery/pkg/util/mergepatch/util.go | 3 +- .../apimachinery/pkg/util/net/port_split.go | 13 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 1 + .../apimachinery/pkg/util/proxy/dial_test.go | 3 +- .../apimachinery/pkg/util/proxy/transport.go | 3 +- .../pkg/util/proxy/upgradeaware_test.go | 3 +- .../pkg/util/strategicpatch/patch_test.go | 1 - .../k8s.io/apimachinery/pkg/util/wait/wait.go | 12 +- .../k8s.io/apimachinery/pkg/watch/filter.go | 1 - .../pkg/admission/initializer/interfaces.go | 7 +- .../plugin/resourcequota/controller.go | 20 +- .../pkg/apis/example2/v1/generated.pb.go | 16 +- .../pkg/endpoints/deprecation/deprecation.go | 5 +- .../handlers/fieldmanager/scalehandler.go | 6 +- .../pkg/endpoints/request/context_test.go | 2 +- .../pkg/endpoints/responsewriter/wrapper.go | 16 +- .../pkg/registry/generic/registry/store.go | 14 +- .../pkg/registry/rest/resttest/resttest.go | 8 +- .../pkg/server/filters/maxinflight_test.go | 14 +- .../apiserver/pkg/server/genericapiserver.go | 71 ++-- ...ericapiserver_graceful_termination_test.go | 183 +++++----- .../k8s.io/apiserver/pkg/server/handler.go | 2 +- .../apiserver/pkg/server/healthz/doc.go | 5 +- .../src/k8s.io/apiserver/pkg/server/hooks.go | 1 + .../apiserver/pkg/server/lifecycle_signals.go | 4 +- .../apiserver/pkg/server/options/admission.go | 16 +- .../pkg/server/options/authorization.go | 5 +- .../apiserver/pkg/storage/cacher/cacher.go | 3 +- .../pkg/storage/cacher/time_budget.go | 15 +- .../storage/cacher/watch_cache_interval.go | 4 +- .../pkg/storage/etcd3/latency_tracker.go | 3 +- .../etcd3/testing/testingcert/certificates.go | 15 +- .../storage/etcd3/testserver/test_server.go | 10 +- .../apiserver/pkg/storageversion/updater.go | 9 +- .../flowcontrol/fairqueuing/queueset/doc.go | 7 +- .../fairqueuing/queueset/queueset_test.go | 26 +- .../fairqueuing/testing/eventclock/fake.go | 3 +- .../request/list_work_estimator.go | 3 +- .../pkg/util/flowcontrol/watch_tracker.go | 5 +- .../pkg/util/webhook/serviceresolver.go | 3 +- .../apiserver/pkg/util/wsstream/conn.go | 18 +- .../x509metrics/server_cert_deprecations.go | 18 +- .../plugin/pkg/audit/buffered/buffered.go | 8 +- .../pkg/authenticator/token/oidc/oidc.go | 24 +- .../plugin/pkg/authorizer/webhook/webhook.go | 90 ++--- .../pkg/genericclioptions/command_headers.go | 7 +- .../client-go/applyconfigurations/doc.go | 124 +++---- .../client-go/kubernetes/fake/register.go | 14 +- .../client-go/kubernetes/scheme/register.go | 14 +- .../typed/events/v1beta1/event_expansion.go | 3 +- staging/src/k8s.io/client-go/rest/client.go | 14 +- staging/src/k8s.io/client-go/rest/plugin.go | 7 +- staging/src/k8s.io/client-go/rest/request.go | 22 +- staging/src/k8s.io/client-go/rest/warnings.go | 6 +- .../k8s.io/client-go/tools/auth/clientauth.go | 26 +- .../client-go/tools/cache/controller.go | 77 ++--- .../client-go/tools/cache/delta_fifo.go | 38 +-- .../client-go/tools/cache/expiration_cache.go | 15 +- .../src/k8s.io/client-go/tools/cache/fifo.go | 9 +- .../src/k8s.io/client-go/tools/cache/index.go | 8 +- .../client-go/tools/clientcmd/loader.go | 6 +- .../tools/leaderelection/leaderelection.go | 2 +- .../tools/portforward/portforward.go | 18 +- .../client-go/tools/record/events_cache.go | 14 +- .../src/k8s.io/client-go/tools/watch/until.go | 4 +- .../k8s.io/client-go/util/jsonpath/parser.go | 2 +- .../src/k8s.io/client-go/util/retry/util.go | 44 +-- .../util/testing/fake_openapi_handler.go | 5 +- .../k8s.io/client-go/util/workqueue/doc.go | 14 +- staging/src/k8s.io/cloud-provider/app/core.go | 1 - .../cloud-provider/app/testing/testserver.go | 5 +- .../controllers/node/node_controller.go | 4 +- .../controllers/service/controller_test.go | 29 +- .../cloud-provider/node/helpers/address.go | 8 +- .../cmd/client-gen/generators/util/tags.go | 1 - .../code-generator/cmd/conversion-gen/main.go | 19 +- .../code-generator/cmd/deepcopy-gen/main.go | 9 +- .../code-generator/cmd/defaulter-gen/main.go | 6 +- .../cmd/go-to-protobuf/protobuf/parser.go | 3 +- .../cmd/prerelease-lifecycle-gen/main.go | 3 +- .../prerelease-lifecycle-generators/status.go | 3 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../internalversion/fake/register.go | 14 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../apiserver/openapi/zz_generated.openapi.go | 4 +- .../crd/clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../k8s.io/component-base/configz/configz.go | 33 +- .../src/k8s.io/component-base/logs/logs.go | 12 +- .../src/k8s.io/component-base/metrics/desc.go | 4 +- .../k8s.io/component-base/metrics/metric.go | 11 +- .../k8s.io/component-base/metrics/value.go | 3 +- .../src/k8s.io/component-base/traces/utils.go | 9 +- .../reconciliation/reconcile_rolebindings.go | 6 +- .../node/topology/helpers.go | 5 +- .../pkg/leadermigration/config/default.go | 3 +- .../pkg/leadermigration/migrator.go | 5 +- .../cri-api/pkg/apis/runtime/v1/api.pb.go | 62 ++-- .../pkg/apis/runtime/v1alpha2/api.pb.go | 62 ++-- .../csi-translation-lib/plugins/aws_ebs.go | 10 +- .../plugins/in_tree_volume.go | 12 +- .../clientset/fake/register.go | 14 +- .../clientset/scheme/register.go | 14 +- .../deprecated/fake/register.go | 14 +- .../deprecated/scheme/register.go | 14 +- .../autoregister/autoregister_controller.go | 3 +- .../generated/openapi/zz_generated.openapi.go | 4 +- .../kube-scheduler/extender/v1/types.go | 12 +- staging/src/k8s.io/kubectl/pkg/cmd/cmd.go | 16 +- .../src/k8s.io/kubectl/pkg/cmd/debug/debug.go | 5 +- .../kubectl/pkg/cmd/get/customcolumn.go | 16 +- .../kubectl/pkg/cmd/set/env/env_resolve.go | 14 +- .../k8s.io/kubectl/pkg/cmd/set/set_subject.go | 2 +- .../k8s.io/kubectl/pkg/describe/describe.go | 2 +- .../pkg/polymorphichelpers/rollback.go | 6 +- .../kubectl/pkg/util/deployment/deployment.go | 6 +- .../kubectl/pkg/util/term/term_writer.go | 8 +- staging/src/k8s.io/kubectl/pkg/util/util.go | 8 +- .../pkg/apis/deviceplugin/v1alpha/api.pb.go | 21 +- .../pkg/apis/deviceplugin/v1beta1/api.pb.go | 35 +- .../pkg/apis/pluginregistration/v1/api.pb.go | 18 +- .../k8s.io/legacy-cloud-providers/aws/aws.go | 14 +- .../aws/aws_loadbalancer.go | 3 +- .../legacy-cloud-providers/aws/instances.go | 6 +- .../legacy-cloud-providers/aws/volumes.go | 6 +- .../azure/azure_blobDiskController.go | 8 +- .../azure/azure_instances.go | 3 +- .../azure/azure_loadbalancer.go | 2 +- .../azure/azure_managedDiskController.go | 6 +- .../azure/azure_routes.go | 9 +- .../azure/azure_standard.go | 8 +- .../azure/azure_vmss.go | 8 +- .../azure/azure_wrap.go | 6 +- .../legacy-cloud-providers/gce/gce_util.go | 2 +- .../openstack/openstack_loadbalancer.go | 2 +- .../vsphere/nodemanager.go | 6 +- .../vsphere/vclib/utils.go | 4 +- .../vsphere/vsphere_test.go | 3 +- .../clientset/deprecated/fake/register.go | 14 +- .../clientset/deprecated/scheme/register.go | 14 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../client/custom_metrics/scheme/register.go | 14 +- staging/src/k8s.io/mount-utils/mount.go | 3 +- .../policy/check_seccompProfile_baseline.go | 2 - .../pod-security-admission/policy/checks.go | 9 +- .../test/fixtures_test.go | 2 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../generated/openapi/zz_generated.openapi.go | 4 +- .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- test/conformance/doc.go | 10 +- test/e2e/apimachinery/namespace.go | 7 +- .../autoscaling/horizontal_pod_autoscaling.go | 1 - test/e2e/cloud/gcp/cluster_upgrade.go | 5 +- test/e2e/cloud/gcp/reboot.go | 12 +- test/e2e/common/storage/host_path.go | 10 +- .../autoscaling/autoscaling_utils.go | 4 +- test/e2e/framework/config/config.go | 36 +- test/e2e/framework/internal/output/output.go | 7 +- test/e2e/framework/log.go | 8 +- test/e2e/framework/network/utils.go | 63 ++-- test/e2e/framework/providers/azure/azure.go | 2 +- test/e2e/framework/pv/pv.go | 34 +- test/e2e/framework/service/jig.go | 8 +- test/e2e/framework/test_context.go | 36 +- test/e2e/framework/util.go | 26 +- test/e2e/network/netpol/kubemanager.go | 4 +- test/e2e/network/networking_perf.go | 23 +- test/e2e/network/service_latency.go | 4 +- .../storage/framework/driver_operations.go | 4 +- test/e2e/storage/persistent_volumes.go | 6 +- test/e2e/storage/podlogs/podlogs.go | 4 +- test/e2e/storage/testsuites/volume_io.go | 3 +- test/e2e/storage/testsuites/volumemode.go | 5 +- test/e2e/storage/utils/create.go | 18 +- test/e2e/storage/utils/deployment.go | 18 +- .../e2e/storage/vsphere/pvc_label_selector.go | 27 +- test/e2e/storage/vsphere/vsphere_scale.go | 14 +- test/e2e/storage/vsphere/vsphere_stress.go | 14 +- test/e2e/storage/vsphere/vsphere_utils.go | 4 +- .../vsphere/vsphere_volume_cluster_ds.go | 14 +- .../vsphere/vsphere_volume_master_restart.go | 18 +- .../vsphere/vsphere_volume_node_poweroff.go | 6 +- .../storage/vsphere/vsphere_volume_perf.go | 4 +- .../vsphere/vsphere_volume_vpxd_restart.go | 24 +- test/e2e_node/eviction_test.go | 10 +- test/e2e_node/garbage_collector_test.go | 11 +- test/e2e_node/image_list.go | 2 +- test/e2e_node/memory_manager_test.go | 2 +- test/e2e_node/runner/remote/run_remote.go | 13 +- test/images/agnhost/net/nat/closewait.go | 1 + test/images/agnhost/nettest/nettest.go | 2 +- test/images/apparmor-loader/loader.go | 9 +- .../admissionwebhook/admission_test.go | 3 +- .../apiserver/flowcontrol/fight_test.go | 32 +- test/integration/quota/quota_test.go | 11 +- .../scheduler/plugins/plugins_test.go | 7 +- .../scheduler_perf/scheduler_perf_test.go | 11 +- .../volume/persistent_volumes_test.go | 18 +- test/utils/admission_webhook.go | 3 +- test/utils/harness/harness.go | 12 +- 451 files changed, 3281 insertions(+), 2918 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b5e0608c64a..c0ecd317094 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1217,7 +1217,7 @@ "type": "object" }, "io.k8s.api.apps.v1.StatefulSet": { - "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -5247,7 +5247,7 @@ "x-kubernetes-map-type": "atomic" }, "io.k8s.api.core.v1.EndpointSubset": { - "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", "properties": { "addresses": { "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", @@ -5274,7 +5274,7 @@ "type": "object" }, "io.k8s.api.core.v1.Endpoints": { - "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -7631,7 +7631,7 @@ "type": "object" }, "io.k8s.api.core.v1.PodIP": { - "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.", "properties": { "ip": { "description": "ip is an IP address (IPv4 or IPv6) assigned to the pod", @@ -10521,7 +10521,7 @@ ] }, "io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": { - "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", @@ -11070,7 +11070,7 @@ ] }, "io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": { - "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", @@ -14062,7 +14062,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": { @@ -15292,7 +15292,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/api__v1_openapi.json b/api/openapi-spec/v3/api__v1_openapi.json index f2b8cd2e860..826d871c984 100644 --- a/api/openapi-spec/v3/api__v1_openapi.json +++ b/api/openapi-spec/v3/api__v1_openapi.json @@ -1621,7 +1621,7 @@ "x-kubernetes-map-type": "atomic" }, "io.k8s.api.core.v1.EndpointSubset": { - "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", "properties": { "addresses": { "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", @@ -1663,7 +1663,7 @@ "type": "object" }, "io.k8s.api.core.v1.Endpoints": { - "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -4786,7 +4786,7 @@ "type": "object" }, "io.k8s.api.core.v1.PodIP": { - "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.", "properties": { "ip": { "description": "ip is an IP address (IPv4 or IPv6) assigned to the pod", @@ -7779,7 +7779,7 @@ ] }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -8953,7 +8953,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json index 780e1a6d540..90dae91eec3 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json @@ -1547,7 +1547,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json index 4d2936c1365..de8278d2897 100644 --- a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json @@ -1879,7 +1879,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__apps__v1_openapi.json b/api/openapi-spec/v3/apis__apps__v1_openapi.json index 400a9982ca5..80edf141614 100644 --- a/api/openapi-spec/v3/apis__apps__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apps__v1_openapi.json @@ -915,7 +915,7 @@ "type": "object" }, "io.k8s.api.apps.v1.StatefulSet": { - "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -4824,7 +4824,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -5946,7 +5946,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json index 009e904999c..6b17d202ab4 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json @@ -1249,7 +1249,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json index cab884ed01f..2bfa3dc9613 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json @@ -837,7 +837,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -1959,7 +1959,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json index 8d7b6be9f77..0e887dfabcf 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json @@ -828,7 +828,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -1950,7 +1950,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index d4fd0a34b59..499c25bb271 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -3903,7 +3903,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -5025,7 +5025,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json index f0d52773cc2..6a5ca8a0e88 100644 --- a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json @@ -1287,7 +1287,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json index d06d3a8244c..482e5d2fecd 100644 --- a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json @@ -1181,7 +1181,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json index cad67f2f223..8bad7a91438 100644 --- a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json @@ -1339,7 +1339,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json index f0731551d0c..0a564867cb8 100644 --- a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json @@ -1303,7 +1303,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json index 519a0ffd708..d6da58f1bc4 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json @@ -251,7 +251,7 @@ ] }, "io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": { - "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { "default": 0, @@ -1749,7 +1749,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json index 3123914c56d..2167450fe2c 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json @@ -251,7 +251,7 @@ ] }, "io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": { - "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { "default": 0, @@ -1749,7 +1749,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json index aaeb71534d7..47e6944abbb 100644 --- a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json @@ -1266,7 +1266,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json index a3025a2448b..1344dbe2775 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json @@ -1983,7 +1983,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json index 6e50a05fd20..6540662e68f 100644 --- a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json @@ -174,7 +174,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -1239,7 +1239,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__policy__v1_openapi.json b/api/openapi-spec/v3/apis__policy__v1_openapi.json index 777ab72dba0..b14a27baac0 100644 --- a/api/openapi-spec/v3/apis__policy__v1_openapi.json +++ b/api/openapi-spec/v3/apis__policy__v1_openapi.json @@ -1355,7 +1355,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" }, "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { diff --git a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json index 701ecac3616..5e201854e6a 100644 --- a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json @@ -1616,7 +1616,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json index 7f5525bc4dd..1aee1ac65cf 100644 --- a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json @@ -1152,7 +1152,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json index bc9baf68191..a7630077dd2 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json @@ -1921,7 +1921,7 @@ "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -3043,7 +3043,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json index 4fa4524079a..4ccfde61f3c 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json @@ -113,7 +113,7 @@ ] }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "oneOf": [ { "type": "string" @@ -1235,7 +1235,7 @@ ] }, "io.k8s.apimachinery.pkg.runtime.RawExtension": { - "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "type": "object" } }, diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go index e92fff9b7f0..01df67130d3 100644 --- a/cmd/kube-apiserver/app/testing/testserver.go +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -94,8 +94,9 @@ func NewDefaultTestServerOptions() *TestServerInstanceOptions { // and location of the tmpdir are returned. // // Note: we return a tear-down func instead of a stop channel because the later will leak temporary -// files that because Golang testing's call to os.Exit will not give a stop channel go routine -// enough time to remove temporary files. +// +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporary files. func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) { if instanceOptions == nil { instanceOptions = NewDefaultTestServerOptions() diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index 3b074a6acdc..32f84caecd3 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index 21e2375b1eb..90cf272b4d0 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index 6ee175659d1..e51eb8fdd6a 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index 8508f003e92..e1b5641f6e5 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 96a90af4462..32e48623a34 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( @@ -381,7 +380,8 @@ func (c ControllerContext) IsControllerEnabled(name string) bool { type InitFunc func(ctx context.Context, controllerCtx ControllerContext) (controller controller.Interface, enabled bool, err error) // ControllerInitializersFunc is used to create a collection of initializers -// given the loopMode. +// +// given the loopMode. type ControllerInitializersFunc func(loopMode ControllerLoopMode) (initializers map[string]InitFunc) var _ ControllerInitializersFunc = NewControllerInitializers @@ -727,7 +727,8 @@ func leaderElectAndRun(c *config.CompletedConfig, lockIdentity string, electionC } // createInitializersFunc creates a initializersFunc that returns all initializer -// with expected as the result after filtering through filterFunc. +// +// with expected as the result after filtering through filterFunc. func createInitializersFunc(filterFunc leadermigration.FilterFunc, expected leadermigration.FilterResult) ControllerInitializersFunc { return func(loopMode ControllerLoopMode) map[string]InitFunc { initializers := make(map[string]InitFunc) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index c6e6792c912..cb692299647 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/discovery.go b/cmd/kube-controller-manager/app/discovery.go index 52064c50c61..ffc1af95728 100644 --- a/cmd/kube-controller-manager/app/discovery.go +++ b/cmd/kube-controller-manager/app/discovery.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index b3d2e7e2da5..0a833bd6ba3 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -15,7 +15,6 @@ limitations under the License. */ // Package options provides the flags used for the controller manager. -// package options import ( diff --git a/cmd/kube-controller-manager/app/policy.go b/cmd/kube-controller-manager/app/policy.go index cbf5f272f41..8c66cbf2a2f 100644 --- a/cmd/kube-controller-manager/app/policy.go +++ b/cmd/kube-controller-manager/app/policy.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes replication controllers, service endpoints and // nodes. -// package app import ( diff --git a/cmd/kube-controller-manager/app/testing/testserver.go b/cmd/kube-controller-manager/app/testing/testserver.go index 595984fac2d..148b9c9953d 100644 --- a/cmd/kube-controller-manager/app/testing/testserver.go +++ b/cmd/kube-controller-manager/app/testing/testserver.go @@ -58,8 +58,9 @@ type Logger interface { // and location of the tmpdir are returned. // // Note: we return a tear-down func instead of a stop channel because the later will leak temporary -// files that because Golang testing's call to os.Exit will not give a stop channel go routine -// enough time to remove temporary files. +// +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporary files. func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) { stopCh := make(chan struct{}) var errCh chan error diff --git a/cmd/kube-scheduler/app/testing/testserver.go b/cmd/kube-scheduler/app/testing/testserver.go index e76b2204b23..819c7a2a7b9 100644 --- a/cmd/kube-scheduler/app/testing/testserver.go +++ b/cmd/kube-scheduler/app/testing/testserver.go @@ -59,8 +59,9 @@ type Logger interface { // and location of the tmpdir are returned. // // Note: we return a tear-down func instead of a stop channel because the later will leak temporary -// files that because Golang testing's call to os.Exit will not give a stop channel go routine -// enough time to remove temporary files. +// +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporary files. func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta2/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1beta2/doc.go index 1e040793a7f..92b5f7b9153 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta2/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta2/doc.go @@ -25,22 +25,23 @@ limitations under the License. // This version improves on the v1beta1 format by fixing some minor issues and adding a few new fields. // // A list of changes since v1beta1: -// - "certificateKey" field is added to InitConfiguration and JoinConfiguration. -// - "ignorePreflightErrors" field is added to the NodeRegistrationOptions. -// - The JSON "omitempty" tag is used in a more places where appropriate. -// - The JSON "omitempty" tag of the "taints" field (inside NodeRegistrationOptions) is removed. -// See the Kubernetes 1.15 changelog for further details. +// - "certificateKey" field is added to InitConfiguration and JoinConfiguration. +// - "ignorePreflightErrors" field is added to the NodeRegistrationOptions. +// - The JSON "omitempty" tag is used in a more places where appropriate. +// - The JSON "omitempty" tag of the "taints" field (inside NodeRegistrationOptions) is removed. +// See the Kubernetes 1.15 changelog for further details. // -// Migration from old kubeadm config versions +// # Migration from old kubeadm config versions // // Please convert your v1beta1 configuration files to v1beta2 using the "kubeadm config migrate" command of kubeadm v1.15.x // (conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g. +// // kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3; // kubeadm v1.13 or v1.14 should be used to translate v1alpha3 to v1beta1) // // Nevertheless, kubeadm v1.15.x will support reading from v1beta1 version of the kubeadm config file format. // -// Basics +// # Basics // // The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the // configuration options defined in the kubeadm config file are also available as command line flags, but only @@ -50,24 +51,25 @@ limitations under the License. // // kubeadm supports the following configuration types: // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: InitConfiguration +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: InitConfiguration // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: ClusterConfiguration +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: ClusterConfiguration // -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration // -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: JoinConfiguration +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: JoinConfiguration // // To print the defaults for "init" and "join" actions use the following commands: -// kubeadm config print init-defaults -// kubeadm config print join-defaults +// +// kubeadm config print init-defaults +// kubeadm config print join-defaults // // The list of configuration types that must be included in a configuration file depends by the action you are // performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). @@ -82,18 +84,18 @@ limitations under the License. // If the user provides a configuration types that is not expected for the action you are performing, kubeadm will // ignore those types and print a warning. // -// Kubeadm init configuration types +// # Kubeadm init configuration types // // When executing kubeadm init with the --config option, the following configuration types could be used: // InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one // between InitConfiguration and ClusterConfiguration is mandatory. // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: InitConfiguration -// bootstrapTokens: -// ... -// nodeRegistration: -// ... +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: InitConfiguration +// bootstrapTokens: +// ... +// nodeRegistration: +// ... // // The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init // are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm @@ -106,18 +108,18 @@ limitations under the License. // - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; // use it e.g. to customize the API server advertise address. // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: ClusterConfiguration -// networking: -// ... -// etcd: -// ... -// apiServer: -// extraArgs: -// ... -// extraVolumes: -// ... -// ... +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: ClusterConfiguration +// networking: +// ... +// etcd: +// ... +// apiServer: +// extraArgs: +// ... +// extraVolumes: +// ... +// ... // // The ClusterConfiguration type should be used to configure cluster-wide settings, // including settings for: @@ -131,9 +133,9 @@ limitations under the License. // - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane // components by adding customized setting or overriding kubeadm default settings. // -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration -// ... +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// ... // // The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed // in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. @@ -141,9 +143,9 @@ limitations under the License. // See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration // for kube proxy official documentation. // -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration -// ... +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// ... // // The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. @@ -154,113 +156,113 @@ limitations under the License. // Here is a fully populated example of a single YAML file containing multiple // configuration types to be used during a `kubeadm init` run. // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: InitConfiguration -// bootstrapTokens: -// - token: "9a08jv.c0izixklcxtmnze7" -// description: "kubeadm bootstrap token" -// ttl: "24h" -// - token: "783bde.3f89s0fje9f38fhf" -// description: "another bootstrap token" -// usages: -// - authentication -// - signing -// groups: -// - system:bootstrappers:kubeadm:default-node-token -// nodeRegistration: -// name: "ec2-10-100-0-1" -// criSocket: "unix:///var/run/containerd/containerd.sock" -// taints: -// - key: "kubeadmNode" -// value: "someValue" -// effect: "NoSchedule" -// kubeletExtraArgs: -// v: 4 -// ignorePreflightErrors: -// - IsPrivilegedUser -// localAPIEndpoint: -// advertiseAddress: "10.100.0.1" -// bindPort: 6443 -// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" -// --- -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: ClusterConfiguration -// etcd: -// # one of local or external -// local: -// imageRepository: "registry.k8s.io" -// imageTag: "3.2.24" -// dataDir: "/var/lib/etcd" -// extraArgs: -// listen-client-urls: "http://10.100.0.1:2379" -// serverCertSANs: -// - "ec2-10-100-0-1.compute-1.amazonaws.com" -// peerCertSANs: -// - "10.100.0.1" -// # external: -// # endpoints: -// # - "10.100.0.1:2379" -// # - "10.100.0.2:2379" -// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" -// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" -// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" -// networking: -// serviceSubnet: "10.96.0.0/16" -// podSubnet: "10.244.0.0/24" -// dnsDomain: "cluster.local" -// kubernetesVersion: "v1.12.0" -// controlPlaneEndpoint: "10.100.0.1:6443" -// apiServer: -// extraArgs: -// authorization-mode: "Node,RBAC" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certSANs: -// - "10.100.1.1" -// - "ec2-10-100-0-1.compute-1.amazonaws.com" -// timeoutForControlPlane: 4m0s -// controllerManager: -// extraArgs: -// "node-cidr-mask-size": "20" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// scheduler: -// extraArgs: -// address: "10.100.0.1" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certificatesDir: "/etc/kubernetes/pki" -// imageRepository: "registry.k8s.io" -// useHyperKubeImage: false -// clusterName: "example-cluster" -// --- -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration -// # kubelet specific options here -// --- -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration -// # kube-proxy specific options here +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: InitConfiguration +// bootstrapTokens: +// - token: "9a08jv.c0izixklcxtmnze7" +// description: "kubeadm bootstrap token" +// ttl: "24h" +// - token: "783bde.3f89s0fje9f38fhf" +// description: "another bootstrap token" +// usages: +// - authentication +// - signing +// groups: +// - system:bootstrappers:kubeadm:default-node-token +// nodeRegistration: +// name: "ec2-10-100-0-1" +// criSocket: "unix:///var/run/containerd/containerd.sock" +// taints: +// - key: "kubeadmNode" +// value: "someValue" +// effect: "NoSchedule" +// kubeletExtraArgs: +// v: 4 +// ignorePreflightErrors: +// - IsPrivilegedUser +// localAPIEndpoint: +// advertiseAddress: "10.100.0.1" +// bindPort: 6443 +// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" +// --- +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: ClusterConfiguration +// etcd: +// # one of local or external +// local: +// imageRepository: "registry.k8s.io" +// imageTag: "3.2.24" +// dataDir: "/var/lib/etcd" +// extraArgs: +// listen-client-urls: "http://10.100.0.1:2379" +// serverCertSANs: +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// peerCertSANs: +// - "10.100.0.1" +// # external: +// # endpoints: +// # - "10.100.0.1:2379" +// # - "10.100.0.2:2379" +// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" +// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" +// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" +// networking: +// serviceSubnet: "10.96.0.0/16" +// podSubnet: "10.244.0.0/24" +// dnsDomain: "cluster.local" +// kubernetesVersion: "v1.12.0" +// controlPlaneEndpoint: "10.100.0.1:6443" +// apiServer: +// extraArgs: +// authorization-mode: "Node,RBAC" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certSANs: +// - "10.100.1.1" +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// timeoutForControlPlane: 4m0s +// controllerManager: +// extraArgs: +// "node-cidr-mask-size": "20" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// scheduler: +// extraArgs: +// address: "10.100.0.1" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certificatesDir: "/etc/kubernetes/pki" +// imageRepository: "registry.k8s.io" +// useHyperKubeImage: false +// clusterName: "example-cluster" +// --- +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// # kubelet specific options here +// --- +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// # kube-proxy specific options here // -// Kubeadm join configuration types +// # Kubeadm join configuration types // // When executing kubeadm join with the --config option, the JoinConfiguration type should be provided. // -// apiVersion: kubeadm.k8s.io/v1beta2 -// kind: JoinConfiguration -// ... +// apiVersion: kubeadm.k8s.io/v1beta2 +// kind: JoinConfiguration +// ... // // The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join // are the discovery method used for accessing the cluster info and all the setting which are specific @@ -271,7 +273,6 @@ limitations under the License. // node only (e.g. the node ip). // // - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. -// package v1beta2 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" //TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta3/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1beta3/doc.go index 63940262f02..4922c4c9c65 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta3/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta3/doc.go @@ -23,28 +23,28 @@ limitations under the License. // This version improves on the v1beta2 format by fixing some minor issues and adding a few new fields. // // A list of changes since v1beta2: -// - The deprecated "ClusterConfiguration.useHyperKubeImage" field has been removed. -// Kubeadm no longer supports the hyperkube image. -// - The "ClusterConfiguration.DNS.Type" field has been removed since CoreDNS is the only supported -// DNS server type by kubeadm. -// - Include "datapolicy" tags on the fields that hold secrets. -// This would result in the field values to be omitted when API structures are printed with klog. -// - Add "InitConfiguration.SkipPhases", "JoinConfiguration.SkipPhases" to allow skipping -// a list of phases during kubeadm init/join command execution. -// - Add "InitConfiguration.NodeRegistration.ImagePullPolicy" and "JoinConfiguration.NodeRegistration.ImagePullPolicy" -// to allow specifying the images pull policy during kubeadm "init" and "join". The value must be one of "Always", "Never" or -// "IfNotPresent". "IfNotPresent" is the default, which has been the existing behavior prior to this addition. -// - Add "InitConfiguration.Patches.Directory", "JoinConfiguration.Patches.Directory" to allow -// the user to configure a directory from which to take patches for components deployed by kubeadm. -// - Move the BootstrapToken* API and related utilities out of the "kubeadm" API group to a new group -// "bootstraptoken". The kubeadm API version v1beta3 no longer contains the BootstrapToken* structures. +// - The deprecated "ClusterConfiguration.useHyperKubeImage" field has been removed. +// Kubeadm no longer supports the hyperkube image. +// - The "ClusterConfiguration.DNS.Type" field has been removed since CoreDNS is the only supported +// DNS server type by kubeadm. +// - Include "datapolicy" tags on the fields that hold secrets. +// This would result in the field values to be omitted when API structures are printed with klog. +// - Add "InitConfiguration.SkipPhases", "JoinConfiguration.SkipPhases" to allow skipping +// a list of phases during kubeadm init/join command execution. +// - Add "InitConfiguration.NodeRegistration.ImagePullPolicy" and "JoinConfiguration.NodeRegistration.ImagePullPolicy" +// to allow specifying the images pull policy during kubeadm "init" and "join". The value must be one of "Always", "Never" or +// "IfNotPresent". "IfNotPresent" is the default, which has been the existing behavior prior to this addition. +// - Add "InitConfiguration.Patches.Directory", "JoinConfiguration.Patches.Directory" to allow +// the user to configure a directory from which to take patches for components deployed by kubeadm. +// - Move the BootstrapToken* API and related utilities out of the "kubeadm" API group to a new group +// "bootstraptoken". The kubeadm API version v1beta3 no longer contains the BootstrapToken* structures. // // Migration from old kubeadm config versions // -// - kubeadm v1.15.x and newer can be used to migrate from v1beta1 to v1beta2. -// - kubeadm v1.22.x and newer no longer support v1beta1 and older APIs, but can be used to migrate v1beta2 to v1beta3. +// - kubeadm v1.15.x and newer can be used to migrate from v1beta1 to v1beta2. +// - kubeadm v1.22.x and newer no longer support v1beta1 and older APIs, but can be used to migrate v1beta2 to v1beta3. // -// Basics +// # Basics // // The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the // configuration options defined in the kubeadm config file are also available as command line flags, but only @@ -54,24 +54,25 @@ limitations under the License. // // kubeadm supports the following configuration types: // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: InitConfiguration +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: InitConfiguration // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: ClusterConfiguration +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: ClusterConfiguration // -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration // -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: JoinConfiguration +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: JoinConfiguration // // To print the defaults for "init" and "join" actions use the following commands: -// kubeadm config print init-defaults -// kubeadm config print join-defaults +// +// kubeadm config print init-defaults +// kubeadm config print join-defaults // // The list of configuration types that must be included in a configuration file depends by the action you are // performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). @@ -86,18 +87,18 @@ limitations under the License. // If the user provides a configuration types that is not expected for the action you are performing, kubeadm will // ignore those types and print a warning. // -// Kubeadm init configuration types +// # Kubeadm init configuration types // // When executing kubeadm init with the --config option, the following configuration types could be used: // InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one // between InitConfiguration and ClusterConfiguration is mandatory. // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: InitConfiguration -// bootstrapTokens: -// ... -// nodeRegistration: -// ... +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: InitConfiguration +// bootstrapTokens: +// ... +// nodeRegistration: +// ... // // The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init // are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm @@ -110,18 +111,18 @@ limitations under the License. // - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; // use it e.g. to customize the API server advertise address. // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: ClusterConfiguration -// networking: -// ... -// etcd: -// ... -// apiServer: -// extraArgs: -// ... -// extraVolumes: -// ... -// ... +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: ClusterConfiguration +// networking: +// ... +// etcd: +// ... +// apiServer: +// extraArgs: +// ... +// extraVolumes: +// ... +// ... // // The ClusterConfiguration type should be used to configure cluster-wide settings, // including settings for: @@ -135,9 +136,9 @@ limitations under the License. // - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane // components by adding customized setting or overriding kubeadm default settings. // -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration -// ... +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// ... // // The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed // in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. @@ -145,9 +146,9 @@ limitations under the License. // See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration // for kube proxy official documentation. // -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration -// ... +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// ... // // The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. @@ -158,115 +159,115 @@ limitations under the License. // Here is a fully populated example of a single YAML file containing multiple // configuration types to be used during a `kubeadm init` run. // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: InitConfiguration -// bootstrapTokens: -// - token: "9a08jv.c0izixklcxtmnze7" -// description: "kubeadm bootstrap token" -// ttl: "24h" -// - token: "783bde.3f89s0fje9f38fhf" -// description: "another bootstrap token" -// usages: -// - authentication -// - signing -// groups: -// - system:bootstrappers:kubeadm:default-node-token -// nodeRegistration: -// name: "ec2-10-100-0-1" -// criSocket: "unix:///var/run/containerd/containerd.sock" -// taints: -// - key: "kubeadmNode" -// value: "someValue" -// effect: "NoSchedule" -// kubeletExtraArgs: -// v: 4 -// ignorePreflightErrors: -// - IsPrivilegedUser -// imagePullPolicy: "IfNotPresent" -// localAPIEndpoint: -// advertiseAddress: "10.100.0.1" -// bindPort: 6443 -// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" -// skipPhases: -// - addon/kube-proxy -// --- -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: ClusterConfiguration -// etcd: -// # one of local or external -// local: -// imageRepository: "registry.k8s.io" -// imageTag: "3.2.24" -// dataDir: "/var/lib/etcd" -// extraArgs: -// listen-client-urls: "http://10.100.0.1:2379" -// serverCertSANs: -// - "ec2-10-100-0-1.compute-1.amazonaws.com" -// peerCertSANs: -// - "10.100.0.1" -// # external: -// # endpoints: -// # - "10.100.0.1:2379" -// # - "10.100.0.2:2379" -// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" -// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" -// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" -// networking: -// serviceSubnet: "10.96.0.0/16" -// podSubnet: "10.244.0.0/24" -// dnsDomain: "cluster.local" -// kubernetesVersion: "v1.21.0" -// controlPlaneEndpoint: "10.100.0.1:6443" -// apiServer: -// extraArgs: -// authorization-mode: "Node,RBAC" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certSANs: -// - "10.100.1.1" -// - "ec2-10-100-0-1.compute-1.amazonaws.com" -// timeoutForControlPlane: 4m0s -// controllerManager: -// extraArgs: -// "node-cidr-mask-size": "20" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// scheduler: -// extraArgs: -// address: "10.100.0.1" -// extraVolumes: -// - name: "some-volume" -// hostPath: "/etc/some-path" -// mountPath: "/etc/some-pod-path" -// readOnly: false -// pathType: File -// certificatesDir: "/etc/kubernetes/pki" -// imageRepository: "registry.k8s.io" -// clusterName: "example-cluster" -// --- -// apiVersion: kubelet.config.k8s.io/v1beta1 -// kind: KubeletConfiguration -// # kubelet specific options here -// --- -// apiVersion: kubeproxy.config.k8s.io/v1alpha1 -// kind: KubeProxyConfiguration -// # kube-proxy specific options here +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: InitConfiguration +// bootstrapTokens: +// - token: "9a08jv.c0izixklcxtmnze7" +// description: "kubeadm bootstrap token" +// ttl: "24h" +// - token: "783bde.3f89s0fje9f38fhf" +// description: "another bootstrap token" +// usages: +// - authentication +// - signing +// groups: +// - system:bootstrappers:kubeadm:default-node-token +// nodeRegistration: +// name: "ec2-10-100-0-1" +// criSocket: "unix:///var/run/containerd/containerd.sock" +// taints: +// - key: "kubeadmNode" +// value: "someValue" +// effect: "NoSchedule" +// kubeletExtraArgs: +// v: 4 +// ignorePreflightErrors: +// - IsPrivilegedUser +// imagePullPolicy: "IfNotPresent" +// localAPIEndpoint: +// advertiseAddress: "10.100.0.1" +// bindPort: 6443 +// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" +// skipPhases: +// - addon/kube-proxy +// --- +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: ClusterConfiguration +// etcd: +// # one of local or external +// local: +// imageRepository: "registry.k8s.io" +// imageTag: "3.2.24" +// dataDir: "/var/lib/etcd" +// extraArgs: +// listen-client-urls: "http://10.100.0.1:2379" +// serverCertSANs: +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// peerCertSANs: +// - "10.100.0.1" +// # external: +// # endpoints: +// # - "10.100.0.1:2379" +// # - "10.100.0.2:2379" +// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" +// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" +// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" +// networking: +// serviceSubnet: "10.96.0.0/16" +// podSubnet: "10.244.0.0/24" +// dnsDomain: "cluster.local" +// kubernetesVersion: "v1.21.0" +// controlPlaneEndpoint: "10.100.0.1:6443" +// apiServer: +// extraArgs: +// authorization-mode: "Node,RBAC" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certSANs: +// - "10.100.1.1" +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// timeoutForControlPlane: 4m0s +// controllerManager: +// extraArgs: +// "node-cidr-mask-size": "20" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// scheduler: +// extraArgs: +// address: "10.100.0.1" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certificatesDir: "/etc/kubernetes/pki" +// imageRepository: "registry.k8s.io" +// clusterName: "example-cluster" +// --- +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// # kubelet specific options here +// --- +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// # kube-proxy specific options here // -// Kubeadm join configuration types +// # Kubeadm join configuration types // // When executing kubeadm join with the --config option, the JoinConfiguration type should be provided. // -// apiVersion: kubeadm.k8s.io/v1beta3 -// kind: JoinConfiguration -// ... +// apiVersion: kubeadm.k8s.io/v1beta3 +// kind: JoinConfiguration +// ... // // The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join // are the discovery method used for accessing the cluster info and all the setting which are specific @@ -277,7 +278,6 @@ limitations under the License. // node only (e.g. the node ip). // // - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. -// package v1beta3 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" //TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 6e38c79873d..3fe3c2e3875 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -424,7 +424,7 @@ func TestValidateAPIEndpoint(t *testing.T) { } } -//TODO: Create a separated test for ValidateClusterConfiguration +// TODO: Create a separated test for ValidateClusterConfiguration func TestValidateInitConfiguration(t *testing.T) { nodename := "valid-nodename" var tests = []struct { diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 89c46c672bd..0bae95c2d47 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -131,7 +131,8 @@ type initData struct { // newCmdInit returns "kubeadm init" command. // NB. initOptions is exposed as parameter for allowing unit testing of -// the newInitOptions method, that implements all the command options validation logic +// +// the newInitOptions method, that implements all the command options validation logic func newCmdInit(out io.Writer, initOptions *initOptions) *cobra.Command { if initOptions == nil { initOptions = newInitOptions() diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index d1e347ee65d..4c3d1b9905a 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -157,7 +157,8 @@ type joinData struct { // newCmdJoin returns "kubeadm join" command. // NB. joinOptions is exposed as parameter for allowing unit testing of -// the newJoinData method, that implements all the command options validation logic +// +// the newJoinData method, that implements all the command options validation logic func newCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { if joinOptions == nil { joinOptions = newJoinOptions() diff --git a/cmd/kubeadm/app/cmd/util/join.go b/cmd/kubeadm/app/cmd/util/join.go index a22fcd3fa6e..639f5420945 100644 --- a/cmd/kubeadm/app/cmd/util/join.go +++ b/cmd/kubeadm/app/cmd/util/join.go @@ -38,13 +38,15 @@ var joinCommandTemplate = template.Must(template.New("join").Parse(`` + )) // GetJoinWorkerCommand returns the kubeadm join command for a given token and -// Kubernetes cluster (the current cluster in the kubeconfig file) +// +// Kubernetes cluster (the current cluster in the kubeconfig file) func GetJoinWorkerCommand(kubeConfigFile, token string, skipTokenPrint bool) (string, error) { return getJoinCommand(kubeConfigFile, token, "", false, skipTokenPrint, false) } // GetJoinControlPlaneCommand returns the kubeadm join command for a given token and -// Kubernetes cluster (the current cluster in the kubeconfig file) +// +// Kubernetes cluster (the current cluster in the kubeconfig file) func GetJoinControlPlaneCommand(kubeConfigFile, token, key string, skipTokenPrint, skipCertificateKeyPrint bool) (string, error) { return getJoinCommand(kubeConfigFile, token, key, true, skipTokenPrint, skipCertificateKeyPrint) } diff --git a/cmd/kubeadm/app/phases/copycerts/copycerts.go b/cmd/kubeadm/app/phases/copycerts/copycerts.go index 9f0f1121da3..c40489298e6 100644 --- a/cmd/kubeadm/app/phases/copycerts/copycerts.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts.go @@ -76,7 +76,7 @@ func createShortLivedBootstrapToken(client clientset.Interface) (string, error) return tokens[0].Token.ID, nil } -//CreateCertificateKey returns a cryptographically secure random key +// CreateCertificateKey returns a cryptographically secure random key func CreateCertificateKey() (string, error) { randBytes, err := cryptoutil.CreateRandBytes(kubeadmconstants.CertificateKeySize) if err != nil { @@ -85,7 +85,7 @@ func CreateCertificateKey() (string, error) { return hex.EncodeToString(randBytes), nil } -//UploadCerts save certs needs to join a new control-plane on kubeadm-certs sercret. +// UploadCerts save certs needs to join a new control-plane on kubeadm-certs sercret. func UploadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, key string) error { fmt.Printf("[upload-certs] Storing the certificates in Secret %q in the %q Namespace\n", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) decodedKey, err := hex.DecodeString(key) diff --git a/cmd/kubeadm/app/phases/kubelet/flags.go b/cmd/kubeadm/app/phases/kubelet/flags.go index c12257e2d32..e2a8fdab022 100644 --- a/cmd/kubeadm/app/phases/kubelet/flags.go +++ b/cmd/kubeadm/app/phases/kubelet/flags.go @@ -71,8 +71,8 @@ func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.ClusterConfiguration, nodeReg *k return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir) } -//buildKubeletArgMapCommon takes a kubeletFlagsOpts object and builds based on that a string-string map with flags -//that are common to both Linux and Windows +// buildKubeletArgMapCommon takes a kubeletFlagsOpts object and builds based on that a string-string map with flags +// that are common to both Linux and Windows func buildKubeletArgMapCommon(opts kubeletFlagsOpts) map[string]string { kubeletFlags := map[string]string{} kubeletFlags["container-runtime-endpoint"] = opts.nodeRegOpts.CRISocket diff --git a/cmd/kubeadm/app/util/config/cluster.go b/cmd/kubeadm/app/util/config/cluster.go index 937832ea7ac..855a87f76e3 100644 --- a/cmd/kubeadm/app/util/config/cluster.go +++ b/cmd/kubeadm/app/util/config/cluster.go @@ -160,7 +160,8 @@ func GetNodeRegistration(kubeconfigFile string, client clientset.Interface, node // getNodeNameFromKubeletConfig gets the node name from a kubelet config file // TODO: in future we want to switch to a more canonical way for doing this e.g. by having this -// information in the local kubelet config.yaml +// +// information in the local kubelet config.yaml func getNodeNameFromKubeletConfig(fileName string) (string, error) { // loads the kubelet.conf file config, err := clientcmd.LoadFromFile(fileName) diff --git a/cmd/kubeadm/app/util/version.go b/cmd/kubeadm/app/util/version.go index 38148454b10..0520b639bf1 100644 --- a/cmd/kubeadm/app/util/version.go +++ b/cmd/kubeadm/app/util/version.go @@ -57,12 +57,13 @@ var ( // servers and then return actual semantic version. // // Available names on release servers: -// stable (latest stable release) -// stable-1 (latest stable release in 1.x) -// stable-1.0 (and similarly 1.1, 1.2, 1.3, ...) -// latest (latest release, including alpha/beta) -// latest-1 (latest release in 1.x, including alpha/beta) -// latest-1.0 (and similarly 1.1, 1.2, 1.3, ...) +// +// stable (latest stable release) +// stable-1 (latest stable release in 1.x) +// stable-1.0 (and similarly 1.1, 1.2, 1.3, ...) +// latest (latest release, including alpha/beta) +// latest-1 (latest release in 1.x, including alpha/beta) +// latest-1.0 (and similarly 1.1, 1.2, 1.3, ...) func KubernetesReleaseVersion(version string) (string, error) { return kubernetesReleaseVersion(version, fetchFromURL) } diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index cf88e90a7b7..6a329fbe08b 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -45,9 +45,10 @@ const defaultRootDir = "/var/lib/kubelet" // KubeletFlags contains configuration flags for the Kubelet. // A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: -// - its value will never, or cannot safely be changed during the lifetime of a node, or -// - its value cannot be safely shared between nodes at the same time (e.g. a hostname); -// KubeletConfiguration is intended to be shared between nodes. +// - its value will never, or cannot safely be changed during the lifetime of a node, or +// - its value cannot be safely shared between nodes at the same time (e.g. a hostname); +// KubeletConfiguration is intended to be shared between nodes. +// // In general, please try to avoid adding flags or configuration fields, // we already have a confusingly large amount of them. type KubeletFlags struct { diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 6d064326ec7..2aeac0ccfd7 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -1075,9 +1075,11 @@ func setContentTypeForClient(cfg *restclient.Config, contentType string) { } // RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: -// 1 Integration tests -// 2 Kubelet binary -// 3 Standalone 'kubernetes' binary +// +// 1 Integration tests +// 2 Kubelet binary +// 3 Standalone 'kubernetes' binary +// // Eventually, #2 will be replaced with instances of #3 func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error { hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride) diff --git a/pkg/apis/apps/types.go b/pkg/apis/apps/types.go index 5b99047cd49..24f78a3af7a 100644 --- a/pkg/apis/apps/types.go +++ b/pkg/apis/apps/types.go @@ -27,8 +27,9 @@ import ( // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { diff --git a/pkg/apis/autoscaling/helpers.go b/pkg/apis/autoscaling/helpers.go index 632c1983318..f66a12f4def 100644 --- a/pkg/apis/autoscaling/helpers.go +++ b/pkg/apis/autoscaling/helpers.go @@ -21,16 +21,16 @@ package autoscaling // It should always be called when converting internal -> external versions, prior // to setting any of the custom annotations: // -// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations) -// externalObj.Annotations = annotations +// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations) +// externalObj.Annotations = annotations // -// if internal.SomeField != nil { -// if !copiedAnnotations { -// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations) -// copiedAnnotations = true -// } -// externalObj.Annotations[...] = json.Marshal(...) -// } +// if internal.SomeField != nil { +// if !copiedAnnotations { +// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations) +// copiedAnnotations = true +// } +// externalObj.Annotations[...] = json.Marshal(...) +// } func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) { _, hasMetricsSpecs := in[MetricSpecsAnnotation] _, hasBehaviorSpecs := in[BehaviorSpecsAnnotation] diff --git a/pkg/apis/certificates/types.go b/pkg/apis/certificates/types.go index a8a9c837c16..9a43f796c16 100644 --- a/pkg/apis/certificates/types.go +++ b/pkg/apis/certificates/types.go @@ -193,7 +193,8 @@ type CertificateSigningRequestList struct { // KeyUsages specifies valid usage contexts for keys. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string const ( diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 3e33e319013..aca06e7d1d8 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -3221,8 +3221,9 @@ type PodDNSConfigOption struct { // PodIP represents the IP address of a pod. // IP address information. Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within -// the cluster. +// +// IP: An IP address allocated to the pod. Routable at least within +// the cluster. type PodIP struct { IP string } @@ -4035,17 +4036,18 @@ type ServiceAccountList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] +// +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] type Endpoints struct { metav1.TypeMeta // +optional @@ -4058,13 +4060,16 @@ type Endpoints struct { // EndpointSubset is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } +// +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// // The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] type EndpointSubset struct { Addresses []EndpointAddress NotReadyAddresses []EndpointAddress diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index b0ad047073f..d545c7b7507 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4083,8 +4083,9 @@ var sysctlContainSlashRegexp = regexp.MustCompile("^" + SysctlContainSlashFmt + // IsValidSysctlName checks that the given string is a valid sysctl name, // i.e. matches SysctlContainSlashFmt. // More info: -// https://man7.org/linux/man-pages/man8/sysctl.8.html -// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +// +// https://man7.org/linux/man-pages/man8/sysctl.8.html +// https://man7.org/linux/man-pages/man5/sysctl.d.5.html func IsValidSysctlName(name string) bool { if len(name) > SysctlMaxLength { return false diff --git a/pkg/apis/flowcontrol/types.go b/pkg/apis/flowcontrol/types.go index 1e896ae9c95..1370e7209db 100644 --- a/pkg/apis/flowcontrol/types.go +++ b/pkg/apis/flowcontrol/types.go @@ -391,8 +391,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/pkg/apis/flowcontrol/validation/validation.go b/pkg/apis/flowcontrol/validation/validation.go index e24e4cb9f00..5f6aa7f73d0 100644 --- a/pkg/apis/flowcontrol/validation/validation.go +++ b/pkg/apis/flowcontrol/validation/validation.go @@ -468,10 +468,10 @@ func ValidatePriorityLevelConfigurationCondition(condition *flowcontrol.Priority } // ValidateNonResourceURLPath validates non-resource-url path by following rules: -// 1. Slash must be the leading character of the path -// 2. White-space is forbidden in the path -// 3. Continuous/double slash is forbidden in the path -// 4. Wildcard "*" should only do suffix glob matching. Note that wildcard also matches slashes. +// 1. Slash must be the leading character of the path +// 2. White-space is forbidden in the path +// 3. Continuous/double slash is forbidden in the path +// 4. Wildcard "*" should only do suffix glob matching. Note that wildcard also matches slashes. func ValidateNonResourceURLPath(path string, fldPath *field.Path) *field.Error { if len(path) == 0 { return field.Invalid(fldPath, path, "must not be empty") diff --git a/pkg/apis/policy/validation/validation.go b/pkg/apis/policy/validation/validation.go index a3888f5df4b..57a0d46ea62 100644 --- a/pkg/apis/policy/validation/validation.go +++ b/pkg/apis/policy/validation/validation.go @@ -408,8 +408,9 @@ var sysctlContainSlashPatternRegexp = regexp.MustCompile("^" + SysctlContainSlas // IsValidSysctlPattern checks if name is a valid sysctl pattern. // i.e. matches sysctlContainSlashPatternRegexp. // More info: -// https://man7.org/linux/man-pages/man8/sysctl.8.html -// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +// +// https://man7.org/linux/man-pages/man8/sysctl.8.html +// https://man7.org/linux/man-pages/man5/sysctl.d.5.html func IsValidSysctlPattern(name string) bool { if len(name) > apivalidation.SysctlMaxLength { return false diff --git a/pkg/controller/certificates/authority/policies.go b/pkg/controller/certificates/authority/policies.go index 411fafeec4b..3a1688baae5 100644 --- a/pkg/controller/certificates/authority/policies.go +++ b/pkg/controller/certificates/authority/policies.go @@ -37,17 +37,17 @@ type SigningPolicy interface { // PermissiveSigningPolicy is the signing policy historically used by the local // signer. // -// * It forwards all SANs from the original signing request. -// * It sets allowed usages as configured in the policy. -// * It zeros all extensions. -// * It sets BasicConstraints to true. -// * It sets IsCA to false. -// * It validates that the signer has not expired. -// * It sets NotBefore and NotAfter: -// All certificates set NotBefore = Now() - Backdate. -// Long-lived certificates set NotAfter = Now() + TTL - Backdate. -// Short-lived certificates set NotAfter = Now() + TTL. -// All certificates truncate NotAfter to the expiration date of the signer. +// - It forwards all SANs from the original signing request. +// - It sets allowed usages as configured in the policy. +// - It zeros all extensions. +// - It sets BasicConstraints to true. +// - It sets IsCA to false. +// - It validates that the signer has not expired. +// - It sets NotBefore and NotAfter: +// All certificates set NotBefore = Now() - Backdate. +// Long-lived certificates set NotAfter = Now() + TTL - Backdate. +// Short-lived certificates set NotAfter = Now() + TTL. +// All certificates truncate NotAfter to the expiration date of the signer. type PermissiveSigningPolicy struct { // TTL is used in certificate NotAfter calculation as described above. TTL time.Duration diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 94225f42baf..0b3a397f8f8 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -54,8 +54,8 @@ func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error { // ClaimObject tries to take ownership of an object for this controller. // // It will reconcile the following: -// * Adopt orphans if the match function returns true. -// * Release owned objects if the match function returns false. +// - Adopt orphans if the match function returns true. +// - Release owned objects if the match function returns false. // // A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation @@ -143,8 +143,9 @@ type PodControllerRefManager struct { // If CanAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once CanAdopt() is called, it will not be called again by the same -// PodControllerRefManager instance. Create a new instance if it makes -// sense to check CanAdopt() again (e.g. in a different sync pass). +// +// PodControllerRefManager instance. Create a new instance if it makes +// sense to check CanAdopt() again (e.g. in a different sync pass). func NewPodControllerRefManager( podControl PodControlInterface, controller metav1.Object, @@ -168,8 +169,8 @@ func NewPodControllerRefManager( // ClaimPods tries to take ownership of a list of Pods. // // It will reconcile the following: -// * Adopt orphans if the selector matches. -// * Release owned objects if the selector no longer matches. +// - Adopt orphans if the selector matches. +// - Release owned objects if the selector no longer matches. // // Optional: If one or more filters are specified, a Pod will only be claimed if // all filters return true. @@ -283,8 +284,9 @@ type ReplicaSetControllerRefManager struct { // If CanAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once CanAdopt() is called, it will not be called again by the same -// ReplicaSetControllerRefManager instance. Create a new instance if it -// makes sense to check CanAdopt() again (e.g. in a different sync pass). +// +// ReplicaSetControllerRefManager instance. Create a new instance if it +// makes sense to check CanAdopt() again (e.g. in a different sync pass). func NewReplicaSetControllerRefManager( rsControl RSControlInterface, controller metav1.Object, @@ -306,8 +308,8 @@ func NewReplicaSetControllerRefManager( // ClaimReplicaSets tries to take ownership of a list of ReplicaSets. // // It will reconcile the following: -// * Adopt orphans if the selector matches. -// * Release owned objects if the selector no longer matches. +// - Adopt orphans if the selector matches. +// - Release owned objects if the selector no longer matches. // // A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation @@ -421,8 +423,9 @@ type ControllerRevisionControllerRefManager struct { // If canAdopt() returns a non-nil error, all adoptions will fail. // // NOTE: Once canAdopt() is called, it will not be called again by the same -// ControllerRevisionControllerRefManager instance. Create a new instance if it -// makes sense to check canAdopt() again (e.g. in a different sync pass). +// +// ControllerRevisionControllerRefManager instance. Create a new instance if it +// makes sense to check canAdopt() again (e.g. in a different sync pass). func NewControllerRevisionControllerRefManager( crControl ControllerRevisionControlInterface, controller metav1.Object, @@ -444,8 +447,8 @@ func NewControllerRevisionControllerRefManager( // ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions. // // It will reconcile the following: -// * Adopt orphans if the selector matches. -// * Release owned objects if the selector no longer matches. +// - Adopt orphans if the selector matches. +// - Release owned objects if the selector no longer matches. // // A non-nil error is returned if some form of reconciliation was attempted and // failed. Usually, controllers should try again later in case reconciliation diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 97f8a03c9c6..241294cded6 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -755,24 +755,24 @@ func (s ActivePods) Less(i, j int) bool { // length. After sorting, the pods will be ordered as follows, applying each // rule in turn until one matches: // -// 1. If only one of the pods is assigned to a node, the pod that is not -// assigned comes before the pod that is. -// 2. If the pods' phases differ, a pending pod comes before a pod whose phase -// is unknown, and a pod whose phase is unknown comes before a running pod. -// 3. If exactly one of the pods is ready, the pod that is not ready comes -// before the ready pod. -// 4. If controller.kubernetes.io/pod-deletion-cost annotation is set, then -// the pod with the lower value will come first. -// 5. If the pods' ranks differ, the pod with greater rank comes before the pod -// with lower rank. -// 6. If both pods are ready but have not been ready for the same amount of -// time, the pod that has been ready for a shorter amount of time comes -// before the pod that has been ready for longer. -// 7. If one pod has a container that has restarted more than any container in -// the other pod, the pod with the container with more restarts comes -// before the other pod. -// 8. If the pods' creation times differ, the pod that was created more recently -// comes before the older pod. +// 1. If only one of the pods is assigned to a node, the pod that is not +// assigned comes before the pod that is. +// 2. If the pods' phases differ, a pending pod comes before a pod whose phase +// is unknown, and a pod whose phase is unknown comes before a running pod. +// 3. If exactly one of the pods is ready, the pod that is not ready comes +// before the ready pod. +// 4. If controller.kubernetes.io/pod-deletion-cost annotation is set, then +// the pod with the lower value will come first. +// 5. If the pods' ranks differ, the pod with greater rank comes before the pod +// with lower rank. +// 6. If both pods are ready but have not been ready for the same amount of +// time, the pod that has been ready for a shorter amount of time comes +// before the pod that has been ready for longer. +// 7. If one pod has a container that has restarted more than any container in +// the other pod, the pod with the container with more restarts comes +// before the other pod. +// 8. If the pods' creation times differ, the pod that was created more recently +// comes before the older pod. // // In 6 and 8, times are compared in a logarithmic scale. This allows a level // of randomness among equivalent Pods when sorting. If two pods have the same diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index bd36768128b..e14fbe74c99 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -58,7 +58,9 @@ func deleteFromActiveList(cj *batchv1.CronJob, uid types.UID) { } // getNextScheduleTime gets the time of next schedule after last scheduled and before now -// it returns nil if no unmet schedule times. +// +// it returns nil if no unmet schedule times. +// // If there are too many (>100) unstarted times, it will raise a warning and but still return // the list of missed times. func getNextScheduleTime(cj batchv1.CronJob, now time.Time, schedule cron.Schedule, recorder record.EventRecorder) (*time.Time, error) { diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 864089fefac..21bf7d4811f 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -1263,10 +1263,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string) // NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a // summary. Returned booleans are: -// * shouldRun: +// - shouldRun: // Returns true when a daemonset should run on the node if a daemonset pod is not already // running on that node. -// * shouldContinueRunning: +// - shouldContinueRunning: // Returns true when a daemonset should continue running on a node if a daemonset pod is already // running on that node. func NodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) { diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index acc90d2afc7..d5a7861ba1f 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -106,10 +106,10 @@ func (dc *DeploymentController) checkPausedConditions(ctx context.Context, d *ap // // rsList should come from getReplicaSetsForDeployment(d). // -// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). -// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), -// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. -// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. +// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). +// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), +// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. +// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d06d392e97c..cb2fac36342 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -302,7 +302,8 @@ var annotationsToSkip = map[string]bool{ // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key // TODO: How to decide which annotations should / should not be copied? -// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +// +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 func skipCopyAnnotation(key string) bool { return annotationsToSkip[key] } @@ -595,9 +596,9 @@ func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList // EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] // We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index 6869d4fc2ee..c7040a056ee 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -1029,7 +1029,7 @@ func TestMaxUnavailable(t *testing.T) { } } -//Set of simple tests for annotation related util functions +// Set of simple tests for annotation related util functions func TestAnnotationUtils(t *testing.T) { //Setup diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 0a4220f6c93..e292a9839a1 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -269,7 +269,7 @@ func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) { pod.OwnerReferences = append(pod.OwnerReferences, controllerReference) } -// pod, podName := newPod(t, name) +// pod, podName := newPod(t, name) func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) { var controllerReference metav1.OwnerReference var trueVar = true diff --git a/pkg/controller/endpointslice/reconciler.go b/pkg/controller/endpointslice/reconciler.go index 96389009957..160fca2bf3c 100644 --- a/pkg/controller/endpointslice/reconciler.go +++ b/pkg/controller/endpointslice/reconciler.go @@ -389,13 +389,13 @@ func (r *reconciler) finalize( // the list of desired endpoints and returns lists of slices to create, update, // and delete. It also checks that the slices mirror the parent services labels. // The logic is split up into several main steps: -// 1. Iterate through existing slices, delete endpoints that are no longer -// desired and update matching endpoints that have changed. It also checks -// if the slices have the labels of the parent services, and updates them if not. -// 2. Iterate through slices that have been modified in 1 and fill them up with -// any remaining desired endpoints. -// 3. If there still desired endpoints left, try to fit them into a previously -// unchanged slice and/or create new ones. +// 1. Iterate through existing slices, delete endpoints that are no longer +// desired and update matching endpoints that have changed. It also checks +// if the slices have the labels of the parent services, and updates them if not. +// 2. Iterate through slices that have been modified in 1 and fill them up with +// any remaining desired endpoints. +// 3. If there still desired endpoints left, try to fit them into a previously +// unchanged slice and/or create new ones. func (r *reconciler) reconcileByPortMapping( service *corev1.Service, existingSlices []*discovery.EndpointSlice, diff --git a/pkg/controller/endpointslice/topologycache/sliceinfo.go b/pkg/controller/endpointslice/topologycache/sliceinfo.go index 6bfd350fdf7..8b52815f996 100644 --- a/pkg/controller/endpointslice/topologycache/sliceinfo.go +++ b/pkg/controller/endpointslice/topologycache/sliceinfo.go @@ -47,11 +47,11 @@ func (si *SliceInfo) getTotalReadyEndpoints() int { // getAllocatedHintsByZone sums up the allocated hints we currently have in // unchanged slices and marks slices for update as necessary. A slice needs to // be updated if any of the following are true: -// - It has an endpoint without zone hints -// - It has an endpoint hint for a zone that no longer needs any -// - It has endpoint hints that would make the minimum allocations necessary -// impossible with changes to slices that are already being updated or -// created. +// - It has an endpoint without zone hints +// - It has an endpoint hint for a zone that no longer needs any +// - It has endpoint hints that would make the minimum allocations necessary +// impossible with changes to slices that are already being updated or +// created. func (si *SliceInfo) getAllocatedHintsByZone(allocations map[string]Allocation) EndpointZoneInfo { allocatedHintsByZone := EndpointZoneInfo{} diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index a3673241144..4a3f8c71246 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -981,11 +981,12 @@ func (jm *Controller) removeTrackingFinalizersFromAllPods(ctx context.Context, p } // trackJobStatusAndRemoveFinalizers does: -// 1. Add finished Pods to .status.uncountedTerminatedPods -// 2. Remove the finalizers from the Pods if they completed or were removed -// or the job was removed. -// 3. Increment job counters for pods that no longer have a finalizer. -// 4. Add Complete condition if satisfied with current counters. +// 1. Add finished Pods to .status.uncountedTerminatedPods +// 2. Remove the finalizers from the Pods if they completed or were removed +// or the job was removed. +// 3. Increment job counters for pods that no longer have a finalizer. +// 4. Add Complete condition if satisfied with current counters. +// // It does this up to a limited number of Pods so that the size of .status // doesn't grow too much and this sync doesn't starve other Jobs. func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job *batch.Job, pods []*v1.Pod, succeededIndexes orderedIntervals, uncounted uncountedTerminatedPods, expectedRmFinalizers sets.String, finishedCond *batch.JobCondition, needsFlush bool) error { @@ -1076,12 +1077,13 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job } // flushUncountedAndRemoveFinalizers does: -// 1. flush the Job status that might include new uncounted Pod UIDs. -// 2. perform the removal of finalizers from Pods which are in the uncounted -// lists. -// 3. update the counters based on the Pods for which it successfully removed -// the finalizers. -// 4. (if not all removals succeeded) flush Job status again. +// 1. flush the Job status that might include new uncounted Pod UIDs. +// 2. perform the removal of finalizers from Pods which are in the uncounted +// lists. +// 3. update the counters based on the Pods for which it successfully removed +// the finalizers. +// 4. (if not all removals succeeded) flush Job status again. +// // Returns whether there are pending changes in the Job status that need to be // flushed in subsequent calls. func (jm *Controller) flushUncountedAndRemoveFinalizers(ctx context.Context, job *batch.Job, podsToRemoveFinalizer []*v1.Pod, uidsWithFinalizer sets.String, oldCounters *batch.JobStatus, needsFlush bool) (*batch.Job, bool, error) { diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go index 7f97f34fdb9..4e8dd5b3a13 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go @@ -81,10 +81,11 @@ type namespacedResourcesDeleter struct { // Delete deletes all resources in the given namespace. // Before deleting resources: -// * It ensures that deletion timestamp is set on the -// namespace (does nothing if deletion timestamp is missing). -// * Verifies that the namespace is in the "terminating" phase -// (updates the namespace phase if it is not yet marked terminating) +// - It ensures that deletion timestamp is set on the +// namespace (does nothing if deletion timestamp is missing). +// - Verifies that the namespace is in the "terminating" phase +// (updates the namespace phase if it is not yet marked terminating) +// // After deleting the resources: // * It removes finalizer token from the given namespace. // @@ -339,9 +340,10 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes // listCollection will list the items in the specified namespace // it returns the following: -// the list of items in the collection (if found) -// a boolean if the operation is supported -// an error if the operation is supported but could not be completed. +// +// the list of items in the collection (if found) +// a boolean if the operation is supported +// an error if the operation is supported but could not be completed. func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*metav1.PartialObjectMetadataList, bool, error) { klog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) diff --git a/pkg/controller/nodeipam/ipam/doc.go b/pkg/controller/nodeipam/ipam/doc.go index 76e51570760..eab481f930b 100644 --- a/pkg/controller/nodeipam/ipam/doc.go +++ b/pkg/controller/nodeipam/ipam/doc.go @@ -17,14 +17,14 @@ limitations under the License. // Package ipam provides different allocators for assigning IP ranges to nodes. // We currently support several kinds of IPAM allocators (these are denoted by // the CIDRAllocatorType): -// - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works -// in conjunction with the RouteController to configure the network to get -// connectivity. -// - CloudAllocator is an allocator that synchronizes PodCIDRs from IP -// ranges assignments from the underlying cloud platform. -// - (Alpha only) IPAMFromCluster is an allocator that has the similar -// functionality as the RangeAllocator but also synchronizes cluster-managed -// ranges into the cloud platform. -// - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes -// from cloud into the cluster.) +// - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works +// in conjunction with the RouteController to configure the network to get +// connectivity. +// - CloudAllocator is an allocator that synchronizes PodCIDRs from IP +// ranges assignments from the underlying cloud platform. +// - (Alpha only) IPAMFromCluster is an allocator that has the similar +// functionality as the RangeAllocator but also synchronizes cluster-managed +// ranges into the cloud platform. +// - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes +// from cloud into the cluster.) package ipam diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index c1c232e0437..356cc29aedf 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -133,9 +133,9 @@ const ( // labelReconcileInfo lists Node labels to reconcile, and how to reconcile them. // primaryKey and secondaryKey are keys of labels to reconcile. // - If both keys exist, but their values don't match. Use the value from the -// primaryKey as the source of truth to reconcile. +// primaryKey as the source of truth to reconcile. // - If ensureSecondaryExists is true, and the secondaryKey does not -// exist, secondaryKey will be added with the value of the primaryKey. +// exist, secondaryKey will be added with the value of the primaryKey. var labelReconcileInfo = []struct { primaryKey string secondaryKey string @@ -1375,9 +1375,9 @@ func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneStat } // classifyNodes classifies the allNodes to three categories: -// 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet' -// 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes' -// 3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states +// 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet' +// 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes' +// 3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZoneRepresentatives []*v1.Node) { for i := range allNodes { if _, has := nc.knownNodeSet[allNodes[i].Name]; !has { @@ -1464,10 +1464,10 @@ func (nc *Controller) cancelPodEviction(node *v1.Node) bool { } // evictPods: -// - adds node to evictor queue if the node is not marked as evicted. -// Returns false if the node name was already enqueued. -// - deletes pods immediately if node is already marked as evicted. -// Returns false, because the node wasn't added to the queue. +// - adds node to evictor queue if the node is not marked as evicted. +// Returns false if the node name was already enqueued. +// - deletes pods immediately if node is already marked as evicted. +// Returns false, because the node wasn't added to the queue. func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.Pod) (bool, error) { status, ok := nc.nodeEvictionMap.getStatus(node.Name) if ok && status == evicted { diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index dbc664ee1bc..4d2a04faaf5 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -3088,18 +3088,23 @@ func generateScalingRules(pods, podsPeriod, percent, percentPeriod, stabilizatio } // generateEventsUniformDistribution generates events that uniformly spread in the time window -// time.Now()-periodSeconds ; time.Now() +// +// time.Now()-periodSeconds ; time.Now() +// // It split the time window into several segments (by the number of events) and put the event in the center of the segment // it is needed if you want to create events for several policies (to check how "outdated" flag is set). // E.g. generateEventsUniformDistribution([]int{1,2,3,4}, 120) will spread events uniformly for the last 120 seconds: // -// 1 2 3 4 +// 1 2 3 4 +// // ----------------------------------------------- -// ^ ^ ^ ^ ^ +// +// ^ ^ ^ ^ ^ +// // -120s -90s -60s -30s now() // And we can safely have two different stabilizationWindows: -// - 60s (guaranteed to have last half of events) -// - 120s (guaranteed to have all events) +// - 60s (guaranteed to have last half of events) +// - 120s (guaranteed to have all events) func generateEventsUniformDistribution(rawEvents []int, periodSeconds int) []timestampedScaleEvent { events := make([]timestampedScaleEvent, len(rawEvents)) segmentDuration := float64(periodSeconds) / float64(len(rawEvents)) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 7c1b5c287c2..214d5e18ebc 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -290,7 +290,8 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) { // It also reconciles ControllerRef by adopting/orphaning. // // NOTE: Returned Pods are pointers to objects from the cache. -// If you need to modify one, you need to copy it first. +// +// If you need to modify one, you need to copy it first. func (ssc *StatefulSetController) getPodsForStatefulSet(ctx context.Context, set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) { // List all pods to include the pods that don't match the selector anymore but // has a ControllerRef pointing to this StatefulSet. diff --git a/pkg/controller/statefulset/stateful_set_utils.go b/pkg/controller/statefulset/stateful_set_utils.go index 2f3eec47055..cce03a77eae 100644 --- a/pkg/controller/statefulset/stateful_set_utils.go +++ b/pkg/controller/statefulset/stateful_set_utils.go @@ -79,7 +79,7 @@ func getParentName(pod *v1.Pod) string { return parent } -// getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned. +// getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned. func getOrdinal(pod *v1.Pod) int { _, ordinal := getParentNameAndOrdinal(pod) return ordinal diff --git a/pkg/controller/util/endpointslice/endpointslice_tracker.go b/pkg/controller/util/endpointslice/endpointslice_tracker.go index 2ab9dc1326a..06ae0fe932e 100644 --- a/pkg/controller/util/endpointslice/endpointslice_tracker.go +++ b/pkg/controller/util/endpointslice/endpointslice_tracker.go @@ -81,11 +81,11 @@ func (est *EndpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSli } // StaleSlices returns true if any of the following are true: -// 1. One or more of the provided EndpointSlices have older generations than the -// corresponding tracked ones. -// 2. The tracker is expecting one or more of the provided EndpointSlices to be -// deleted. (EndpointSlices that have already been marked for deletion are ignored here.) -// 3. The tracker is tracking EndpointSlices that have not been provided. +// 1. One or more of the provided EndpointSlices have older generations than the +// corresponding tracked ones. +// 2. The tracker is expecting one or more of the provided EndpointSlices to be +// deleted. (EndpointSlices that have already been marked for deletion are ignored here.) +// 3. The tracker is tracking EndpointSlices that have not been provided. func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool { est.lock.Lock() defer est.lock.Unlock() diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 1a95a0f140e..31d898f1f2d 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -693,8 +693,9 @@ func (adc *attachDetachController) processVolumesInUse( // For each VA object, this function checks if its present in the ASW. // If not, adds the volume to ASW as an "uncertain" attachment. // In the reconciler, the logic checks if the volume is present in the DSW; -// if yes, the reconciler will attempt attach on the volume; -// if not (could be a dangling attachment), the reconciler will detach this volume. +// +// if yes, the reconciler will attempt attach on the volume; +// if not (could be a dangling attachment), the reconciler will detach this volume. func (adc *attachDetachController) processVolumeAttachments() error { vas, err := adc.volumeAttachmentLister.List(labels.Everything()) if err != nil { diff --git a/pkg/controller/volume/attachdetach/metrics/metrics.go b/pkg/controller/volume/attachdetach/metrics/metrics.go index a2dbed738f2..b3993fccd69 100644 --- a/pkg/controller/volume/attachdetach/metrics/metrics.go +++ b/pkg/controller/volume/attachdetach/metrics/metrics.go @@ -92,10 +92,11 @@ type attachDetachStateCollector struct { } // volumeCount is a map of maps used as a counter, e.g.: -// node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use: -// {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}} -// state actual_state_of_world contains a total of 10 EBS volumes: -// {"actual_state_of_world": {"aws-ebs": 10}} +// +// node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use: +// {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}} +// state actual_state_of_world contains a total of 10 EBS volumes: +// {"actual_state_of_world": {"aws-ebs": 10}} type volumeCount map[string]map[string]int64 func (v volumeCount) add(typeKey, counterKey string) { diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go index 47c4f89fd2a..9ee2dab3bb0 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go @@ -48,9 +48,13 @@ type DesiredStateOfWorldPopulator interface { // NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator. // loopSleepDuration - the amount of time the populator loop sleeps between -// successive executions +// +// successive executions +// // podManager - the kubelet podManager that is the source of truth for the pods -// that exist on this host +// +// that exist on this host +// // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( loopSleepDuration time.Duration, diff --git a/pkg/controller/volume/persistentvolume/binder_test.go b/pkg/controller/volume/persistentvolume/binder_test.go index 7af3617ed34..76ecc458ab5 100644 --- a/pkg/controller/volume/persistentvolume/binder_test.go +++ b/pkg/controller/volume/persistentvolume/binder_test.go @@ -26,10 +26,10 @@ import ( ) // Test single call to syncClaim and syncVolume methods. -// 1. Fill in the controller with initial data -// 2. Call the tested function (syncClaim/syncVolume) via -// controllerTest.testCall *once*. -// 3. Compare resulting volumes and claims with expected volumes and claims. +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. func TestSync(t *testing.T) { labels := map[string]string{ "foo": "true", @@ -842,17 +842,18 @@ func TestSyncBlockVolume(t *testing.T) { // Test multiple calls to syncClaim/syncVolume and periodic sync of all // volume/claims. The test follows this pattern: -// 0. Load the controller with initial data. -// 1. Call controllerTest.testCall() once as in TestSync() -// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, -// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" -// events). Go to 2. if these calls change anything. -// 3. When all changes are processed and no new changes were made, call -// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). -// 4. If some changes were done by step 3., go to 2. (simulation of -// "volume/claim updated" events, eventually performing step 3. again) -// 5. When 3. does not do any changes, finish the tests and compare final set -// of volumes/claims with expected claims/volumes and report differences. +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// // Some limit of calls in enforced to prevent endless loops. func TestMultiSync(t *testing.T) { tests := []controllerTest{ diff --git a/pkg/controller/volume/persistentvolume/delete_test.go b/pkg/controller/volume/persistentvolume/delete_test.go index 4b4b55695e8..0ff670c5fad 100644 --- a/pkg/controller/volume/persistentvolume/delete_test.go +++ b/pkg/controller/volume/persistentvolume/delete_test.go @@ -209,17 +209,18 @@ func TestDeleteSync(t *testing.T) { // Test multiple calls to syncClaim/syncVolume and periodic sync of all // volume/claims. The test follows this pattern: -// 0. Load the controller with initial data. -// 1. Call controllerTest.testCall() once as in TestSync() -// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, -// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" -// events). Go to 2. if these calls change anything. -// 3. When all changes are processed and no new changes were made, call -// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). -// 4. If some changes were done by step 3., go to 2. (simulation of -// "volume/claim updated" events, eventually performing step 3. again) -// 5. When 3. does not do any changes, finish the tests and compare final set -// of volumes/claims with expected claims/volumes and report differences. +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// // Some limit of calls in enforced to prevent endless loops. func TestDeleteMultiSync(t *testing.T) { tests := []controllerTest{ diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 1207b0d566f..ebf98ab8558 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -71,9 +71,10 @@ func init() { // function to call as the actual test. Available functions are: // - testSyncClaim - calls syncClaim on the first claim in initialClaims. // - testSyncClaimError - calls syncClaim on the first claim in initialClaims -// and expects an error to be returned. +// and expects an error to be returned. // - testSyncVolume - calls syncVolume on the first volume in initialVolumes. // - any custom function for specialized tests. +// // The test then contains list of volumes/claims that are expected at the end // of the test and list of generated events. type controllerTest struct { @@ -602,10 +603,10 @@ var ( ) // wrapTestWithPluginCalls returns a testCall that: -// - configures controller with a volume plugin that implements recycler, -// deleter and provisioner. The plugin returns provided errors when a volume -// is deleted, recycled or provisioned. -// - calls given testCall +// - configures controller with a volume plugin that implements recycler, +// deleter and provisioner. The plugin returns provided errors when a volume +// is deleted, recycled or provisioned. +// - calls given testCall func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { plugin := &mockVolumePlugin{ @@ -619,9 +620,9 @@ func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, } // wrapTestWithReclaimCalls returns a testCall that: -// - configures controller with recycler or deleter which will return provided -// errors when a volume is deleted or recycled -// - calls given testCall +// - configures controller with recycler or deleter which will return provided +// errors when a volume is deleted or recycled +// - calls given testCall func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { if operation == operationDelete { return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap) @@ -631,9 +632,9 @@ func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls [] } // wrapTestWithProvisionCalls returns a testCall that: -// - configures controller with a provisioner which will return provided errors -// when a claim is provisioned -// - calls given testCall +// - configures controller with a provisioner which will return provided errors +// when a claim is provisioned +// - calls given testCall func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall { return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap) } @@ -664,11 +665,11 @@ func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall { } // wrapTestWithInjectedOperation returns a testCall that: -// - starts the controller and lets it run original testCall until -// scheduleOperation() call. It blocks the controller there and calls the -// injected function to simulate that something is happening when the -// controller waits for the operation lock. Controller is then resumed and we -// check how it behaves. +// - starts the controller and lets it run original testCall until +// scheduleOperation() call. It blocks the controller there and calls the +// injected function to simulate that something is happening when the +// controller waits for the operation lock. Controller is then resumed and we +// check how it behaves. func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { @@ -716,10 +717,10 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo // Test single call to syncClaim and syncVolume methods. // For all tests: -// 1. Fill in the controller with initial data -// 2. Call the tested function (syncClaim/syncVolume) via -// controllerTest.testCall *once*. -// 3. Compare resulting volumes and claims with expected volumes and claims. +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { doit := func(t *testing.T, test controllerTest) { // Initialize the controller @@ -783,17 +784,18 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag // Test multiple calls to syncClaim/syncVolume and periodic sync of all // volume/claims. For all tests, the test follows this pattern: -// 0. Load the controller with initial data. -// 1. Call controllerTest.testCall() once as in TestSync() -// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, -// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" -// events). Go to 2. if these calls change anything. -// 3. When all changes are processed and no new changes were made, call -// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). -// 4. If some changes were done by step 3., go to 2. (simulation of -// "volume/claim updated" events, eventually performing step 3. again) -// 5. When 3. does not do any changes, finish the tests and compare final set -// of volumes/claims with expected claims/volumes and report differences. +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// // Some limit of calls in enforced to prevent endless loops. func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { run := func(t *testing.T, test controllerTest) { diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index 2c5dc9e593f..0e6ecbeee8d 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -127,23 +127,23 @@ func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.Per // A request for RWO could be satisfied by both sets of indexed volumes, so // allPossibleMatchingAccessModes returns: // -// [][]v1.PersistentVolumeAccessMode { -// []v1.PersistentVolumeAccessMode { -// v1.ReadWriteOnce, v1.ReadOnlyMany, -// }, -// []v1.PersistentVolumeAccessMode { -// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, -// }, -// } +// [][]v1.PersistentVolumeAccessMode { +// []v1.PersistentVolumeAccessMode { +// v1.ReadWriteOnce, v1.ReadOnlyMany, +// }, +// []v1.PersistentVolumeAccessMode { +// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, +// }, +// } // // A request for RWX can be satisfied by only one set of indexed volumes, so // the return is: // -// [][]v1.PersistentVolumeAccessMode { -// []v1.PersistentVolumeAccessMode { -// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, -// }, -// } +// [][]v1.PersistentVolumeAccessMode { +// []v1.PersistentVolumeAccessMode { +// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, +// }, +// } // // This func returns modes with ascending levels of modes to give the user // what is closest to what they actually asked for. diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index 1ba073ed18b..68903f450b3 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -559,17 +559,18 @@ func TestProvisionSync(t *testing.T) { // Test multiple calls to syncClaim/syncVolume and periodic sync of all // volume/claims. The test follows this pattern: -// 0. Load the controller with initial data. -// 1. Call controllerTest.testCall() once as in TestSync() -// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, -// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" -// events). Go to 2. if these calls change anything. -// 3. When all changes are processed and no new changes were made, call -// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). -// 4. If some changes were done by step 3., go to 2. (simulation of -// "volume/claim updated" events, eventually performing step 3. again) -// 5. When 3. does not do any changes, finish the tests and compare final set -// of volumes/claims with expected claims/volumes and report differences. +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// // Some limit of calls in enforced to prevent endless loops. func TestProvisionMultiSync(t *testing.T) { tests := []controllerTest{ diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 2ff5b5138ec..381633c4f0a 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -756,9 +756,10 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // updateClaimStatus saves new claim.Status to API server. // Parameters: -// claim - claim to update -// phase - phase to set -// volume - volume which Capacity is set into claim.Status.Capacity +// +// claim - claim to update +// phase - phase to set +// volume - volume which Capacity is set into claim.Status.Capacity func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) @@ -840,10 +841,11 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo // given event on the claim. It saves the status and emits the event only when // the status has actually changed from the version saved in API server. // Parameters: -// claim - claim to update -// phase - phase to set -// volume - volume which Capacity is set into claim.Status.Capacity -// eventtype, reason, message - event to send, see EventRecorder.Event() +// +// claim - claim to update +// phase - phase to set +// volume - volume which Capacity is set into claim.Status.Capacity +// eventtype, reason, message - event to send, see EventRecorder.Event() func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) if claim.Status.Phase == phase { diff --git a/pkg/controller/volume/persistentvolume/recycle_test.go b/pkg/controller/volume/persistentvolume/recycle_test.go index 84793fe8ed9..015248922a3 100644 --- a/pkg/controller/volume/persistentvolume/recycle_test.go +++ b/pkg/controller/volume/persistentvolume/recycle_test.go @@ -236,17 +236,18 @@ func TestRecycleSync(t *testing.T) { // Test multiple calls to syncClaim/syncVolume and periodic sync of all // volume/claims. The test follows this pattern: -// 0. Load the controller with initial data. -// 1. Call controllerTest.testCall() once as in TestSync() -// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, -// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" -// events). Go to 2. if these calls change anything. -// 3. When all changes are processed and no new changes were made, call -// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). -// 4. If some changes were done by step 3., go to 2. (simulation of -// "volume/claim updated" events, eventually performing step 3. again) -// 5. When 3. does not do any changes, finish the tests and compare final set -// of volumes/claims with expected claims/volumes and report differences. +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// // Some limit of calls in enforced to prevent endless loops. func TestRecycleMultiSync(t *testing.T) { tests := []controllerTest{ diff --git a/pkg/controller/volume/persistentvolume/testing/testing.go b/pkg/controller/volume/persistentvolume/testing/testing.go index dd4a29cf8b6..4f24b5c46a9 100644 --- a/pkg/controller/volume/persistentvolume/testing/testing.go +++ b/pkg/controller/volume/persistentvolume/testing/testing.go @@ -41,21 +41,21 @@ var ErrVersionConflict = errors.New("VersionError") // VolumeReactor is a core.Reactor that simulates etcd and API server. It // stores: -// - Latest version of claims volumes saved by the controller. -// - Queue of all saves (to simulate "volume/claim updated" events). This queue -// contains all intermediate state of an object - e.g. a claim.VolumeName -// is updated first and claim.Phase second. This queue will then contain both -// updates as separate entries. -// - Number of changes since the last call to VolumeReactor.syncAll(). -// - Optionally, volume and claim fake watchers which should be the same ones -// used by the controller. Any time an event function like deleteVolumeEvent -// is called to simulate an event, the reactor's stores are updated and the -// controller is sent the event via the fake watcher. -// - Optionally, list of error that should be returned by reactor, simulating -// etcd / API server failures. These errors are evaluated in order and every -// error is returned only once. I.e. when the reactor finds matching -// ReactorError, it return appropriate error and removes the ReactorError from -// the list. +// - Latest version of claims volumes saved by the controller. +// - Queue of all saves (to simulate "volume/claim updated" events). This queue +// contains all intermediate state of an object - e.g. a claim.VolumeName +// is updated first and claim.Phase second. This queue will then contain both +// updates as separate entries. +// - Number of changes since the last call to VolumeReactor.syncAll(). +// - Optionally, volume and claim fake watchers which should be the same ones +// used by the controller. Any time an event function like deleteVolumeEvent +// is called to simulate an event, the reactor's stores are updated and the +// controller is sent the event via the fake watcher. +// - Optionally, list of error that should be returned by reactor, simulating +// etcd / API server failures. These errors are evaluated in order and every +// error is returned only once. I.e. when the reactor finds matching +// ReactorError, it return appropriate error and removes the ReactorError from +// the list. type VolumeReactor struct { volumes map[string]*v1.PersistentVolume claims map[string]*v1.PersistentVolumeClaim diff --git a/pkg/controlplane/instance.go b/pkg/controlplane/instance.go index 8f169dfc573..2eca86aba69 100644 --- a/pkg/controlplane/instance.go +++ b/pkg/controlplane/instance.go @@ -326,7 +326,8 @@ func (c *Config) Complete() CompletedConfig { // New returns a new instance of Master from the given config. // Certain config fields will be set to a default value if unset. // Certain config fields must be specified, including: -// KubeletClientConfig +// +// KubeletClientConfig func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Instance, error) { if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) { return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig") diff --git a/pkg/controlplane/reconcilers/instancecount.go b/pkg/controlplane/reconcilers/instancecount.go index 6b36747ea6f..6b6282800a9 100644 --- a/pkg/controlplane/reconcilers/instancecount.go +++ b/pkg/controlplane/reconcilers/instancecount.go @@ -53,12 +53,12 @@ func NewMasterCountEndpointReconciler(masterCount int, epAdapter EndpointsAdapte // understand the requirements and the body of this function. // // Requirements: -// * All apiservers MUST use the same ports for their {rw, ro} services. -// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the -// endpoints for their {rw, ro} services. -// * All apiservers MUST know and agree on the number of apiservers expected -// to be running (c.masterCount). -// * ReconcileEndpoints is called periodically from all apiservers. +// - All apiservers MUST use the same ports for their {rw, ro} services. +// - All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the +// endpoints for their {rw, ro} services. +// - All apiservers MUST know and agree on the number of apiservers expected +// to be running (c.masterCount). +// - ReconcileEndpoints is called periodically from all apiservers. func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []corev1.EndpointPort, reconcilePorts bool) error { r.reconcilingLock.Lock() defer r.reconcilingLock.Unlock() @@ -187,10 +187,10 @@ func (r *masterCountEndpointReconciler) Destroy() { // Determine if the endpoint is in the format ReconcileEndpoints expects. // // Return values: -// * formatCorrect is true if exactly one subset is found. -// * ipCorrect is true when current master's IP is found and the number +// - formatCorrect is true if exactly one subset is found. +// - ipCorrect is true when current master's IP is found and the number // of addresses is less than or equal to the master count. -// * portsCorrect is true when endpoint ports exactly match provided ports. +// - portsCorrect is true when endpoint ports exactly match provided ports. // portsCorrect is only evaluated when reconcilePorts is set to true. func checkEndpointSubsetFormat(e *corev1.Endpoints, ip string, ports []corev1.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) { if len(e.Subsets) != 1 { diff --git a/pkg/controlplane/reconcilers/lease.go b/pkg/controlplane/reconcilers/lease.go index 9b07081677a..0d38155c963 100644 --- a/pkg/controlplane/reconcilers/lease.go +++ b/pkg/controlplane/reconcilers/lease.go @@ -262,9 +262,9 @@ func (r *leaseEndpointReconciler) doReconcile(serviceName string, endpointPorts // format ReconcileEndpoints expects when the controller is using leases. // // Return values: -// * formatCorrect is true if exactly one subset is found. -// * ipsCorrect when the addresses in the endpoints match the expected addresses list -// * portsCorrect is true when endpoint ports exactly match provided ports. +// - formatCorrect is true if exactly one subset is found. +// - ipsCorrect when the addresses in the endpoints match the expected addresses list +// - portsCorrect is true when endpoint ports exactly match provided ports. // portsCorrect is only evaluated when reconcilePorts is set to true. func checkEndpointSubsetFormatWithLease(e *corev1.Endpoints, expectedIPs []string, ports []corev1.EndpointPort, reconcilePorts bool) (formatCorrect bool, ipsCorrect bool, portsCorrect bool) { if len(e.Subsets) != 1 { diff --git a/pkg/credentialprovider/config.go b/pkg/credentialprovider/config.go index 8b8ef911518..dccfea1cd20 100644 --- a/pkg/credentialprovider/config.go +++ b/pkg/credentialprovider/config.go @@ -84,12 +84,12 @@ func GetPreferredDockercfgPath() string { return preferredPath } -//DefaultDockercfgPaths returns default search paths of .dockercfg +// DefaultDockercfgPaths returns default search paths of .dockercfg func DefaultDockercfgPaths() []string { return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath} } -//DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json +// DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json func DefaultDockerConfigJSONPaths() []string { return []string{GetPreferredDockercfgPath(), workingDirPath, homeJSONDirPath, rootJSONDirPath} } @@ -156,7 +156,7 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error } -//ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path. +// ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path. func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) { var contents []byte diff --git a/pkg/credentialprovider/gcp/metadata.go b/pkg/credentialprovider/gcp/metadata.go index d87bd5a9440..e71cb2f24d6 100644 --- a/pkg/credentialprovider/gcp/metadata.go +++ b/pkg/credentialprovider/gcp/metadata.go @@ -110,8 +110,9 @@ type DockerConfigURLKeyProvider struct { } // ContainerRegistryProvider is a DockerConfigProvider that provides a dockercfg with: -// Username: "_token" -// Password: "{access token from metadata}" +// +// Username: "_token" +// Password: "{access token from metadata}" type ContainerRegistryProvider struct { MetadataProvider } diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index 4bbe5c4d9f1..0c5b3a0c934 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -31,9 +31,9 @@ import ( // reverse index across the registry endpoints. A registry endpoint is made // up of a host (e.g. registry.example.com), but it may also contain a path // (e.g. registry.example.com/foo) This index is important for two reasons: -// - registry endpoints may overlap, and when this happens we must find the -// most specific match for a given image -// - iterating a map does not yield predictable results +// - registry endpoints may overlap, and when this happens we must find the +// most specific match for a given image +// - iterating a map does not yield predictable results type DockerKeyring interface { Lookup(image string) ([]AuthConfig, bool) } @@ -197,8 +197,9 @@ func URLsMatchStr(glob string, target string) (bool, error) { // glob wild cards in the host name. // // Examples: -// globURL=*.docker.io, targetURL=blah.docker.io => match -// globURL=*.docker.io, targetURL=not.right.io => no match +// +// globURL=*.docker.io, targetURL=blah.docker.io => match +// globURL=*.docker.io, targetURL=not.right.io => no match // // Note that we don't support wildcards in ports and paths yet. func URLsMatch(globURL *url.URL, targetURL *url.URL) (bool, error) { diff --git a/pkg/credentialprovider/plugin/plugin.go b/pkg/credentialprovider/plugin/plugin.go index 297f3164aa9..5e6db98b465 100644 --- a/pkg/credentialprovider/plugin/plugin.go +++ b/pkg/credentialprovider/plugin/plugin.go @@ -367,7 +367,7 @@ type execPlugin struct { // ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig: // -// $ ENV_NAME=ENV_VALUE args[0] args[1] << args[0] args[1] << ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", OneOf: common.GenerateOpenAPIV3OneOfSchema(resource.Quantity{}.OpenAPIV3OneOfTypes()), Format: resource.Quantity{}.OpenAPISchemaFormat(), }, @@ -43729,7 +43729,7 @@ func schema_apimachinery_pkg_api_resource_Quantity(ref common.ReferenceCallback) }, common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", Type: resource.Quantity{}.OpenAPISchemaType(), Format: resource.Quantity{}.OpenAPISchemaFormat(), }, @@ -46088,7 +46088,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Type: []string{"object"}, }, }, @@ -46099,7 +46099,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "apiVersion": { diff --git a/pkg/kubeapiserver/options/admission.go b/pkg/kubeapiserver/options/admission.go index f788c4c36c2..3d8c8ce2ad9 100644 --- a/pkg/kubeapiserver/options/admission.go +++ b/pkg/kubeapiserver/options/admission.go @@ -43,12 +43,13 @@ type AdmissionOptions struct { // NewAdmissionOptions creates a new instance of AdmissionOptions // Note: -// In addition it calls RegisterAllAdmissionPlugins to register -// all kube-apiserver admission plugins. // -// Provides the list of RecommendedPluginOrder that holds sane values -// that can be used by servers that don't care about admission chain. -// Servers that do care can overwrite/append that field after creation. +// In addition it calls RegisterAllAdmissionPlugins to register +// all kube-apiserver admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. func NewAdmissionOptions() *AdmissionOptions { options := genericoptions.NewAdmissionOptions() // register all admission plugins diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index ecf4d89c1eb..e15b759d3e1 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -861,7 +861,6 @@ func getContainer(pid int) (string, error) { // // The reason of leaving kernel threads at root cgroup is that we don't want to tie the // execution of these threads with to-be defined /system quota and create priority inversions. -// func ensureSystemCgroups(rootCgroupPath string, manager cgroups.Manager) error { // Move non-kernel PIDs to the system container. // Only keep errors on latest attempt. diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index 01ddf57bb7b..7628769d5df 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -501,35 +501,35 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C // At a high-level this algorithm can be summarized as: // // For each NUMA single node: -// * If all requested CPUs can be allocated from this NUMA node; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in that NUMA node and return +// - If all requested CPUs can be allocated from this NUMA node; +// --> Do the allocation by running takeByTopologyNUMAPacked() over the +// available CPUs in that NUMA node and return // // Otherwise, for each pair of NUMA nodes: -// * If the set of requested CPUs (modulo 2) can be evenly split across -// the 2 NUMA nodes; AND -// * Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in both NUMA nodes and return +// - If the set of requested CPUs (modulo 2) can be evenly split across +// the 2 NUMA nodes; AND +// - Any remaining CPUs (after the modulo operation) can be striped across +// some subset of the NUMA nodes; +// --> Do the allocation by running takeByTopologyNUMAPacked() over the +// available CPUs in both NUMA nodes and return // // Otherwise, for each 3-tuple of NUMA nodes: -// * If the set of requested CPUs (modulo 3) can be evenly distributed -// across the 3 NUMA nodes; AND -// * Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in all three NUMA nodes and return +// - If the set of requested CPUs (modulo 3) can be evenly distributed +// across the 3 NUMA nodes; AND +// - Any remaining CPUs (after the modulo operation) can be striped across +// some subset of the NUMA nodes; +// --> Do the allocation by running takeByTopologyNUMAPacked() over the +// available CPUs in all three NUMA nodes and return // // ... // // Otherwise, for the set of all NUMA nodes: -// * If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly -// distributed across all NUMA nodes; AND -// * Any remaining CPUs (after the modulo operation) can be striped across -// some subset of the NUMA nodes; -// --> Do the allocation by running takeByTopologyNUMAPacked() over the -// available CPUs in all NUMA nodes and return +// - If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly +// distributed across all NUMA nodes; AND +// - Any remaining CPUs (after the modulo operation) can be striped across +// some subset of the NUMA nodes; +// --> Do the allocation by running takeByTopologyNUMAPacked() over the +// available CPUs in all NUMA nodes and return // // If none of the above conditions can be met, then resort back to a // best-effort fit of packing CPUs into NUMA nodes by calling diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index a872b389c46..c2aca10640f 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -65,24 +65,24 @@ func (e SMTAlignmentError) Type() string { // // The static policy maintains the following sets of logical CPUs: // -// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers -// run here. Initially this contains all CPU IDs on the system. As -// exclusive allocations are created and destroyed, this CPU set shrinks -// and grows, accordingly. This is stored in the state as the default -// CPU set. +// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers +// run here. Initially this contains all CPU IDs on the system. As +// exclusive allocations are created and destroyed, this CPU set shrinks +// and grows, accordingly. This is stored in the state as the default +// CPU set. // -// - RESERVED: A subset of the shared pool which is not exclusively -// allocatable. The membership of this pool is static for the lifetime of -// the Kubelet. The size of the reserved pool is -// ceil(systemreserved.cpu + kubereserved.cpu). -// Reserved CPUs are taken topologically starting with lowest-indexed -// physical core, as reported by cAdvisor. +// - RESERVED: A subset of the shared pool which is not exclusively +// allocatable. The membership of this pool is static for the lifetime of +// the Kubelet. The size of the reserved pool is +// ceil(systemreserved.cpu + kubereserved.cpu). +// Reserved CPUs are taken topologically starting with lowest-indexed +// physical core, as reported by cAdvisor. // -// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated -// from this pool. +// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated +// from this pool. // -// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container. -// These are stored as explicit assignments in the state. +// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container. +// These are stored as explicit assignments in the state. // // When an exclusive allocation is made, the static policy also updates the // default cpuset in the state abstraction. The CPU manager's periodic diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 4b35d3c4fe3..b6c0457bac5 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -40,7 +40,7 @@ const ( defaultNodeAllocatableCgroupName = "kubepods" ) -//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true +// createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { nodeAllocatable := cm.internalCapacity // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. diff --git a/pkg/kubelet/cm/topologymanager/fake_topology_manager.go b/pkg/kubelet/cm/topologymanager/fake_topology_manager.go index 8a60aa23347..998bf484608 100644 --- a/pkg/kubelet/cm/topologymanager/fake_topology_manager.go +++ b/pkg/kubelet/cm/topologymanager/fake_topology_manager.go @@ -27,7 +27,7 @@ type fakeManager struct { hint *TopologyHint } -//NewFakeManager returns an instance of FakeManager +// NewFakeManager returns an instance of FakeManager func NewFakeManager() Manager { klog.InfoS("NewFakeManager") return &fakeManager{} diff --git a/pkg/kubelet/cm/topologymanager/policy.go b/pkg/kubelet/cm/topologymanager/policy.go index 40255ed95cc..76b469fa1fc 100644 --- a/pkg/kubelet/cm/topologymanager/policy.go +++ b/pkg/kubelet/cm/topologymanager/policy.go @@ -293,18 +293,19 @@ func mergeFilteredHints(numaNodes []int, filteredHints [][]TopologyHint) Topolog // permutation as it is found. It is the equivalent of: // // for i := 0; i < len(providerHints[0]); i++ -// for j := 0; j < len(providerHints[1]); j++ -// for k := 0; k < len(providerHints[2]); k++ -// ... -// for z := 0; z < len(providerHints[-1]); z++ -// permutation := []TopologyHint{ -// providerHints[0][i], -// providerHints[1][j], -// providerHints[2][k], -// ... -// providerHints[-1][z] -// } -// callback(permutation) +// +// for j := 0; j < len(providerHints[1]); j++ +// for k := 0; k < len(providerHints[2]); k++ +// ... +// for z := 0; z < len(providerHints[-1]); z++ +// permutation := []TopologyHint{ +// providerHints[0][i], +// providerHints[1][j], +// providerHints[2][k], +// ... +// providerHints[-1][z] +// } +// callback(permutation) func iterateAllProviderTopologyHints(allProviderHints [][]TopologyHint, callback func([]TopologyHint)) { // Internal helper function to accumulate the permutation before calling the callback. var iterate func(i int, accum []TopologyHint) diff --git a/pkg/kubelet/cm/topologymanager/topology_manager.go b/pkg/kubelet/cm/topologymanager/topology_manager.go index 7cd67d1aa60..830703bd364 100644 --- a/pkg/kubelet/cm/topologymanager/topology_manager.go +++ b/pkg/kubelet/cm/topologymanager/topology_manager.go @@ -92,7 +92,7 @@ type HintProvider interface { Allocate(pod *v1.Pod, container *v1.Container) error } -//Store interface is to allow Hint Providers to retrieve pod affinity +// Store interface is to allow Hint Providers to retrieve pod affinity type Store interface { GetAffinity(podUID string, containerName string) TopologyHint } diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 954a6d3d2a2..4dea5ca65bd 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -414,11 +414,11 @@ func podsDifferSemantically(existing, ref *v1.Pod) bool { } // checkAndUpdatePod updates existing, and: -// * if ref makes a meaningful change, returns needUpdate=true -// * if ref makes a meaningful change, and this change is graceful deletion, returns needGracefulDelete=true -// * if ref makes no meaningful change, but changes the pod status, returns needReconcile=true -// * else return all false -// Now, needUpdate, needGracefulDelete and needReconcile should never be both true +// - if ref makes a meaningful change, returns needUpdate=true +// - if ref makes a meaningful change, and this change is graceful deletion, returns needGracefulDelete=true +// - if ref makes no meaningful change, but changes the pod status, returns needReconcile=true +// - else return all false +// Now, needUpdate, needGracefulDelete and needReconcile should never be both true func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) { // 1. this is a reconcile diff --git a/pkg/kubelet/configmap/configmap_manager.go b/pkg/kubelet/configmap/configmap_manager.go index de7ac5c06ca..0f276c083ec 100644 --- a/pkg/kubelet/configmap/configmap_manager.go +++ b/pkg/kubelet/configmap/configmap_manager.go @@ -114,11 +114,11 @@ const ( // NewCachingConfigMapManager creates a manager that keeps a cache of all configmaps // necessary for registered pods. // It implement the following logic: -// - whenever a pod is create or updated, the cached versions of all configmaps -// are invalidated -// - every GetObject() call tries to fetch the value from local cache; if it is -// not there, invalidated or too old, we fetch it from apiserver and refresh the -// value in cache; otherwise it is just fetched from cache +// - whenever a pod is create or updated, the cached versions of all configmaps +// are invalidated +// - every GetObject() call tries to fetch the value from local cache; if it is +// not there, invalidated or too old, we fetch it from apiserver and refresh the +// value in cache; otherwise it is just fetched from cache func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { return kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts) @@ -132,9 +132,9 @@ func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.G // NewWatchingConfigMapManager creates a manager that keeps a cache of all configmaps // necessary for registered pods. // It implements the following logic: -// - whenever a pod is created or updated, we start individual watches for all -// referenced objects that aren't referenced from other registered pods -// - every GetObject() returns a value from local cache propagated via watches +// - whenever a pod is created or updated, we start individual watches for all +// referenced objects that aren't referenced from other registered pods +// - every GetObject() returns a value from local cache propagated via watches func NewWatchingConfigMapManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager { listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts) diff --git a/pkg/kubelet/container/cache.go b/pkg/kubelet/container/cache.go index d4fec40d0c7..ef760826b20 100644 --- a/pkg/kubelet/container/cache.go +++ b/pkg/kubelet/container/cache.go @@ -108,8 +108,8 @@ func (c *cache) Delete(id types.UID) { delete(c.pods, id) } -// UpdateTime modifies the global timestamp of the cache and notify -// subscribers if needed. +// UpdateTime modifies the global timestamp of the cache and notify +// subscribers if needed. func (c *cache) UpdateTime(timestamp time.Time) { c.lock.Lock() defer c.lock.Unlock() diff --git a/pkg/kubelet/cri/streaming/server_test.go b/pkg/kubelet/cri/streaming/server_test.go index b438bc8b667..156c45d8626 100644 --- a/pkg/kubelet/cri/streaming/server_test.go +++ b/pkg/kubelet/cri/streaming/server_test.go @@ -304,7 +304,6 @@ func TestServePortForward(t *testing.T) { doClientStreams(t, "portforward", stream, stream, nil) } -// // Run the remote command test. // commandType is either "exec" or "attach". func runRemoteCommandTest(t *testing.T, commandType string) { diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 68a0c6f3cd6..bf5d6fe7c1a 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -430,10 +430,9 @@ func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc { // Cmp compares p1 and p2 and returns: // -// -1 if p1 < p2 -// 0 if p1 == p2 -// +1 if p1 > p2 -// +// -1 if p1 < p2 +// 0 if p1 == p2 +// +1 if p1 > p2 type cmpFunc func(p1, p2 *v1.Pod) int // multiSorter implements the Sort interface, sorting changes within. diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 4caf8f73ce5..a5feec5e03a 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1477,29 +1477,32 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // Arguments: // // updateType - whether this is a create (first time) or an update, should -// only be used for metrics since this method must be reentrant +// +// only be used for metrics since this method must be reentrant +// // pod - the pod that is being set up // mirrorPod - the mirror pod known to the kubelet for this pod, if any // podStatus - the most recent pod status observed for this pod which can -// be used to determine the set of actions that should be taken during -// this loop of syncPod +// +// be used to determine the set of actions that should be taken during +// this loop of syncPod // // The workflow is: -// * If the pod is being created, record pod worker start latency -// * Call generateAPIPodStatus to prepare an v1.PodStatus for the pod -// * If the pod is being seen as running for the first time, record pod -// start latency -// * Update the status of the pod in the status manager -// * Stop the pod's containers if it should not be running due to soft -// admission -// * Ensure any background tracking for a runnable pod is started -// * Create a mirror pod if the pod is a static pod, and does not -// already have a mirror pod -// * Create the data directories for the pod if they do not exist -// * Wait for volumes to attach/mount -// * Fetch the pull secrets for the pod -// * Call the container runtime's SyncPod callback -// * Update the traffic shaping for the pod's ingress and egress limits +// - If the pod is being created, record pod worker start latency +// - Call generateAPIPodStatus to prepare an v1.PodStatus for the pod +// - If the pod is being seen as running for the first time, record pod +// start latency +// - Update the status of the pod in the status manager +// - Stop the pod's containers if it should not be running due to soft +// admission +// - Ensure any background tracking for a runnable pod is started +// - Create a mirror pod if the pod is a static pod, and does not +// already have a mirror pod +// - Create the data directories for the pod if they do not exist +// - Wait for volumes to attach/mount +// - Fetch the pull secrets for the pod +// - Call the container runtime's SyncPod callback +// - Update the traffic shaping for the pod's ingress and egress limits // // If any step of this workflow errors, the error is returned, and is repeated // on the next syncPod call. @@ -1893,8 +1896,8 @@ func (kl *Kubelet) syncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus } // Get pods which should be resynchronized. Currently, the following pod should be resynchronized: -// * pod whose work is ready. -// * internal modules that request sync of a pod. +// - pod whose work is ready. +// - internal modules that request sync of a pod. func (kl *Kubelet) getPodsToSync() []*v1.Pod { allPods := kl.podManager.GetPods() podUIDs := kl.workQueue.GetWork() @@ -2059,13 +2062,13 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand // With that in mind, in truly no particular order, the different channels // are handled as follows: // -// * configCh: dispatch the pods for the config change to the appropriate -// handler callback for the event type -// * plegCh: update the runtime cache; sync pod -// * syncCh: sync all pods waiting for sync -// * housekeepingCh: trigger cleanup of pods -// * health manager: sync pods that have failed or in which one or more -// containers have failed health checks +// - configCh: dispatch the pods for the config change to the appropriate +// handler callback for the event type +// - plegCh: update the runtime cache; sync pod +// - syncCh: sync all pods waiting for sync +// - housekeepingCh: trigger cleanup of pods +// - health manager: sync pods that have failed or in which one or more +// containers have failed health checks func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler, syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool { select { diff --git a/pkg/kubelet/kubelet_network_linux.go b/pkg/kubelet/kubelet_network_linux.go index ae7d9235a55..caeba316842 100644 --- a/pkg/kubelet/kubelet_network_linux.go +++ b/pkg/kubelet/kubelet_network_linux.go @@ -88,10 +88,10 @@ func (kl *Kubelet) initNetworkUtil() { // syncNetworkUtil ensures the network utility are present on host. // Network util includes: -// 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping -// Marked connection will be drop on INPUT/OUTPUT Chain in filter table -// 2. In nat table, KUBE-MARK-MASQ rule to mark connections for SNAT -// Marked connection will get SNAT on POSTROUTING Chain in nat table +// 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping +// Marked connection will be drop on INPUT/OUTPUT Chain in filter table +// 2. In nat table, KUBE-MARK-MASQ rule to mark connections for SNAT +// Marked connection will get SNAT on POSTROUTING Chain in nat table func (kl *Kubelet) syncNetworkUtil(iptClient utiliptables.Interface) bool { // Setup KUBE-MARK-DROP rules dropMark := getIPTablesMark(kl.iptablesDropBit) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index a72691bfcfc..6e676ffef82 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -268,10 +268,10 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSour // evictSandboxes remove all evictable sandboxes. An evictable sandbox must // meet the following requirements: -// 1. not in ready state -// 2. contains no containers. -// 3. belong to a non-existent (i.e., already removed) pod, or is not the -// most recently created sandbox for the pod. +// 1. not in ready state +// 2. contains no containers. +// 3. belong to a non-existent (i.e., already removed) pod, or is not the +// most recently created sandbox for the pod. func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { containers, err := cgc.manager.getKubeletContainers(true) if err != nil { diff --git a/pkg/kubelet/kuberuntime/logs/logs.go b/pkg/kubelet/kuberuntime/logs/logs.go index 7a3dc51875c..c69b557e37d 100644 --- a/pkg/kubelet/kuberuntime/logs/logs.go +++ b/pkg/kubelet/kuberuntime/logs/logs.go @@ -123,8 +123,9 @@ var parseFuncs = []parseFunc{ } // parseCRILog parses logs in CRI log format. CRI Log format example: -// 2016-10-06T00:17:09.669794202Z stdout P log content 1 -// 2016-10-06T00:17:09.669794203Z stderr F log content 2 +// +// 2016-10-06T00:17:09.669794202Z stdout P log content 1 +// 2016-10-06T00:17:09.669794203Z stderr F log content 2 func parseCRILog(log []byte, msg *logMessage) error { var err error // Parse timestamp @@ -182,8 +183,9 @@ type jsonLog struct { // parseDockerJSONLog parses logs in Docker JSON log format. Docker JSON log format // example: -// {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"} -// {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"} +// +// {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"} +// {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"} func parseDockerJSONLog(log []byte, msg *logMessage) error { var l = &jsonLog{} diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index 2700b2d526e..2e06c266f73 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -185,7 +185,8 @@ func rejectPodAdmissionBasedOnOSSelector(pod *v1.Pod, node *v1.Node) bool { // rejectPodAdmissionBasedOnOSField rejects pods if their OS field doesn't match runtime.GOOS. // TODO: Relax this restriction when we start supporting LCOW in kubernetes where podOS may not match -// node's OS. +// +// node's OS. func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool { if pod.Spec.OS == nil { return false diff --git a/pkg/kubelet/pluginmanager/cache/types.go b/pkg/kubelet/pluginmanager/cache/types.go index a0dbb3696a0..6b0a9a430f0 100644 --- a/pkg/kubelet/pluginmanager/cache/types.go +++ b/pkg/kubelet/pluginmanager/cache/types.go @@ -20,28 +20,30 @@ package cache // order to consume plugins // The PluginHandler follows the simple following state machine: // -// +--------------------------------------+ -// | ReRegistration | -// | Socket created with same plugin name | -// | | -// | | -// Socket Created v + Socket Deleted -// +------------------> Validate +---------------------------> Register +------------------> DeRegister -// + + + -// | | | -// | Error | Error | -// | | | -// v v v -// Out Out Out +// +--------------------------------------+ +// | ReRegistration | +// | Socket created with same plugin name | +// | | +// | | +// Socket Created v + Socket Deleted +// +------------------> Validate +---------------------------> Register +------------------> DeRegister +// + + + +// | | | +// | Error | Error | +// | | | +// v v v +// Out Out Out // // The pluginwatcher module follows strictly and sequentially this state machine for each *plugin name*. // e.g: If you are Registering a plugin foo, you cannot get a DeRegister call for plugin foo -// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) -// call until the Register("foo") call returns. +// +// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) +// call until the Register("foo") call returns. // // ReRegistration: Socket created with same plugin name, usually for a plugin update // e.g: plugin with name foo registers at foo.com/foo-1.9.7 later a plugin with name foo -// registers at foo.com/foo-1.9.9 +// +// registers at foo.com/foo-1.9.9 // // DeRegistration: When ReRegistration happens only the deletion of the new socket will trigger a DeRegister call type PluginHandler interface { diff --git a/pkg/kubelet/pluginmanager/reconciler/reconciler.go b/pkg/kubelet/pluginmanager/reconciler/reconciler.go index d8f104eb809..0c14c1c4cb7 100644 --- a/pkg/kubelet/pluginmanager/reconciler/reconciler.go +++ b/pkg/kubelet/pluginmanager/reconciler/reconciler.go @@ -48,12 +48,16 @@ type Reconciler interface { // NewReconciler returns a new instance of Reconciler. // // loopSleepDuration - the amount of time the reconciler loop sleeps between -// successive executions -// syncDuration - the amount of time the syncStates sleeps between -// successive executions +// +// successive executions +// syncDuration - the amount of time the syncStates sleeps between +// successive executions +// // operationExecutor - used to trigger register/unregister operations safely -// (prevents more than one operation from being triggered on the same -// socket path) +// +// (prevents more than one operation from being triggered on the same +// socket path) +// // desiredStateOfWorld - cache containing the desired state of the world // actualStateOfWorld - cache containing the actual state of the world func NewReconciler( diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 5632745e060..9dad22b0b3e 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -313,10 +313,10 @@ func (s *podSyncStatus) IsDeleted() bool { return s.deleted } // Once a pod is set to be "torn down" it cannot be started again for that // UID (corresponding to a delete or eviction) until: // -// 1. The pod worker is finalized (syncTerminatingPod and -// syncTerminatedPod exit without error sequentially) -// 2. The SyncKnownPods method is invoked by kubelet housekeeping and the pod -// is not part of the known config. +// 1. The pod worker is finalized (syncTerminatingPod and +// syncTerminatedPod exit without error sequentially) +// 2. The SyncKnownPods method is invoked by kubelet housekeeping and the pod +// is not part of the known config. // // Pod workers provide a consistent source of information to other kubelet // loops about the status of the pod and whether containers can be @@ -332,36 +332,50 @@ func (s *podSyncStatus) IsDeleted() bool { return s.deleted } // ---| = kubelet config has synced at least once // -------| |- = pod exists in apiserver config // --------| |---------------- = CouldHaveRunningContainers() is true -// ^- pod is observed by pod worker . -// . . +// +// ^- pod is observed by pod worker . +// . . +// // ----------| |------------------------- = syncPod is running -// . ^- pod worker loop sees change and invokes syncPod -// . . . +// +// . ^- pod worker loop sees change and invokes syncPod +// . . . +// // --------------| |------- = ShouldPodContainersBeTerminating() returns true // --------------| |------- = IsPodTerminationRequested() returns true (pod is known) -// . . ^- Kubelet evicts pod . -// . . . +// +// . . ^- Kubelet evicts pod . +// . . . +// // -------------------| |---------------- = syncTerminatingPod runs then exits without error -// . . ^ pod worker loop exits syncPod, sees pod is terminating, -// . . invokes syncTerminatingPod -// . . . +// +// . . ^ pod worker loop exits syncPod, sees pod is terminating, +// . . invokes syncTerminatingPod +// . . . +// // ---| |------------------| . = ShouldPodRuntimeBeRemoved() returns true (post-sync) -// . ^ syncTerminatingPod has exited successfully -// . . +// +// . ^ syncTerminatingPod has exited successfully +// . . +// // ----------------------------| |------- = syncTerminatedPod runs then exits without error -// . ^ other loops can tear down -// . . +// +// . ^ other loops can tear down +// . . +// // ------------------------------------| |---- = status manager is waiting for PodResourcesAreReclaimed() -// . ^ . +// +// . ^ . +// // ----------| |- = status manager can be writing pod status -// ^ status manager deletes pod because no longer exists in config +// +// ^ status manager deletes pod because no longer exists in config // // Other components in the Kubelet can request a termination of the pod // via the UpdatePod method or the killPodNow wrapper - this will ensure // the components of the pod are stopped until the kubelet is restarted // or permanently (if the phase of the pod is set to a terminal phase // in the pod status change). -// type podWorkers struct { // Protects all per worker fields. podLock sync.Mutex diff --git a/pkg/kubelet/reason_cache.go b/pkg/kubelet/reason_cache.go index 60327baf954..b487a1d9434 100644 --- a/pkg/kubelet/reason_cache.go +++ b/pkg/kubelet/reason_cache.go @@ -29,10 +29,11 @@ import ( // in a string, keyed by _. The goal is to // propagate this reason to the container status. This endeavor is // "best-effort" for two reasons: -// 1. The cache is not persisted. -// 2. We use an LRU cache to avoid extra garbage collection work. This -// means that some entries may be recycled before a pod has been -// deleted. +// 1. The cache is not persisted. +// 2. We use an LRU cache to avoid extra garbage collection work. This +// means that some entries may be recycled before a pod has been +// deleted. +// // TODO(random-liu): Use more reliable cache which could collect garbage of failed pod. // TODO(random-liu): Move reason cache to somewhere better. type ReasonCache struct { diff --git a/pkg/kubelet/runtimeclass/testing/fake_manager.go b/pkg/kubelet/runtimeclass/testing/fake_manager.go index 26ed1631be5..d5cb579fd16 100644 --- a/pkg/kubelet/runtimeclass/testing/fake_manager.go +++ b/pkg/kubelet/runtimeclass/testing/fake_manager.go @@ -45,7 +45,8 @@ func NewPopulatedClient() clientset.Interface { // StartManagerSync starts the manager, and waits for the informer cache to sync. // Returns a function to stop the manager, which should be called with a defer: -// defer StartManagerSync(t, m)() +// +// defer StartManagerSync(t, m)() func StartManagerSync(m *runtimeclass.Manager) func() { stopCh := make(chan struct{}) m.Start(stopCh) diff --git a/pkg/kubelet/secret/secret_manager.go b/pkg/kubelet/secret/secret_manager.go index 292c833555b..bba17fa8fd9 100644 --- a/pkg/kubelet/secret/secret_manager.go +++ b/pkg/kubelet/secret/secret_manager.go @@ -115,11 +115,11 @@ const ( // NewCachingSecretManager creates a manager that keeps a cache of all secrets // necessary for registered pods. // It implements the following logic: -// - whenever a pod is created or updated, the cached versions of all secrets -// are invalidated -// - every GetObject() call tries to fetch the value from local cache; if it is -// not there, invalidated or too old, we fetch it from apiserver and refresh the -// value in cache; otherwise it is just fetched from cache +// - whenever a pod is created or updated, the cached versions of all secrets +// are invalidated +// - every GetObject() call tries to fetch the value from local cache; if it is +// not there, invalidated or too old, we fetch it from apiserver and refresh the +// value in cache; otherwise it is just fetched from cache func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getSecret := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts) @@ -133,9 +133,9 @@ func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetO // NewWatchingSecretManager creates a manager that keeps a cache of all secrets // necessary for registered pods. // It implements the following logic: -// - whenever a pod is created or updated, we start individual watches for all -// referenced objects that aren't referenced from other registered pods -// - every GetObject() returns a value from local cache propagated via watches +// - whenever a pod is created or updated, we start individual watches for all +// referenced objects that aren't referenced from other registered pods +// - every GetObject() returns a value from local cache propagated via watches func NewWatchingSecretManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager { listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { return kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts) diff --git a/pkg/kubelet/server/auth.go b/pkg/kubelet/server/auth.go index 469039b9d2b..962e1ce9507 100644 --- a/pkg/kubelet/server/auth.go +++ b/pkg/kubelet/server/auth.go @@ -59,9 +59,10 @@ func isSubpath(subpath, path string) bool { // GetRequestAttributes populates authorizer attributes for the requests to the kubelet API. // Default attributes are: {apiVersion=v1,verb=,resource=nodes,name=,subresource=proxy} // More specific verb/resource is set for the following request patterns: -// /stats/* => verb=, resource=nodes, name=, subresource=stats -// /metrics/* => verb=, resource=nodes, name=, subresource=metrics -// /logs/* => verb=, resource=nodes, name=, subresource=log +// +// /stats/* => verb=, resource=nodes, name=, subresource=stats +// /metrics/* => verb=, resource=nodes, name=, subresource=metrics +// /logs/* => verb=, resource=nodes, name=, subresource=log func (n nodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *http.Request) authorizer.Attributes { apiVerb := "" diff --git a/pkg/kubelet/sysctl/util.go b/pkg/kubelet/sysctl/util.go index 09f4e75107a..0013fb7f688 100644 --- a/pkg/kubelet/sysctl/util.go +++ b/pkg/kubelet/sysctl/util.go @@ -26,8 +26,9 @@ import ( // The '/' separator is also accepted in place of a '.'. // Convert the sysctl variables to dots separator format for validation. // More info: -// https://man7.org/linux/man-pages/man8/sysctl.8.html -// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +// +// https://man7.org/linux/man-pages/man8/sysctl.8.html +// https://man7.org/linux/man-pages/man5/sysctl.d.5.html func convertSysctlVariableToDotsSeparator(val string) string { if val == "" { return val diff --git a/pkg/kubelet/util/manager/cache_based_manager.go b/pkg/kubelet/util/manager/cache_based_manager.go index a255f364442..22531a28780 100644 --- a/pkg/kubelet/util/manager/cache_based_manager.go +++ b/pkg/kubelet/util/manager/cache_based_manager.go @@ -260,11 +260,11 @@ func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) { // NewCacheBasedManager creates a manager that keeps a cache of all objects // necessary for registered pods. // It implements the following logic: -// - whenever a pod is created or updated, the cached versions of all objects -// is referencing are invalidated -// - every GetObject() call tries to fetch the value from local cache; if it is -// not there, invalidated or too old, we fetch it from apiserver and refresh the -// value in cache; otherwise it is just fetched from cache +// - whenever a pod is created or updated, the cached versions of all objects +// is referencing are invalidated +// - every GetObject() call tries to fetch the value from local cache; if it is +// not there, invalidated or too old, we fetch it from apiserver and refresh the +// value in cache; otherwise it is just fetched from cache func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.String) Manager { return &cacheBasedManager{ objectStore: objectStore, diff --git a/pkg/kubelet/util/manager/watch_based_manager.go b/pkg/kubelet/util/manager/watch_based_manager.go index 785f01be256..e3a1d7e29d8 100644 --- a/pkg/kubelet/util/manager/watch_based_manager.go +++ b/pkg/kubelet/util/manager/watch_based_manager.go @@ -369,9 +369,9 @@ func (c *objectCache) shutdownWhenStopped(stopCh <-chan struct{}) { // NewWatchBasedManager creates a manager that keeps a cache of all objects // necessary for registered pods. // It implements the following logic: -// - whenever a pod is created or updated, we start individual watches for all -// referenced objects that aren't referenced from other registered pods -// - every GetObject() returns a value from local cache propagated via watches +// - whenever a pod is created or updated, we start individual watches for all +// referenced objects that aren't referenced from other registered pods +// - every GetObject() returns a value from local cache propagated via watches func NewWatchBasedManager( listObject listObjectFunc, watchObject watchObjectFunc, diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index f2cbe842eff..386a5bbac5d 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -79,9 +79,13 @@ type podStateProvider interface { // // kubeClient - used to fetch PV and PVC objects from the API server // loopSleepDuration - the amount of time the populator loop sleeps between -// successive executions +// +// successive executions +// // podManager - the kubelet podManager that is the source of truth for the pods -// that exist on this host +// +// that exist on this host +// // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( kubeClient clientset.Interface, diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 3c3b9b4d282..dd19defdbdf 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -75,21 +75,31 @@ type Reconciler interface { // NewReconciler returns a new instance of Reconciler. // // controllerAttachDetachEnabled - if true, indicates that the attach/detach -// controller is responsible for managing the attach/detach operations for -// this node, and therefore the volume manager should not +// +// controller is responsible for managing the attach/detach operations for +// this node, and therefore the volume manager should not +// // loopSleepDuration - the amount of time the reconciler loop sleeps between -// successive executions +// +// successive executions +// // waitForAttachTimeout - the amount of time the Mount function will wait for -// the volume to be attached +// +// the volume to be attached +// // nodeName - the Name for this node, used by Attach and Detach methods // desiredStateOfWorld - cache containing the desired state of the world // actualStateOfWorld - cache containing the actual state of the world // populatorHasAddedPods - checker for whether the populator has finished -// adding pods to the desiredStateOfWorld cache at least once after sources -// are all ready (before sources are ready, pods are probably missing) +// +// adding pods to the desiredStateOfWorld cache at least once after sources +// are all ready (before sources are ready, pods are probably missing) +// // operationExecutor - used to trigger attach/detach/mount/unmount operations -// safely (prevents more than one operation from being triggered on the same -// volume) +// +// safely (prevents more than one operation from being triggered on the same +// volume) +// // mounter - mounter passed in from kubelet, passed down unmount path // hostutil - hostutil passed in from kubelet // volumePluginMgr - volume plugin manager passed from kubelet diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index 6152080493a..03c2abd66bc 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -165,9 +165,12 @@ type podStateProvider interface { // VolumeManager interface. // // kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator -// to communicate with the API server to fetch PV and PVC objects +// +// to communicate with the API server to fetch PV and PVC objects +// // volumePluginMgr - the volume plugin manager used to access volume plugins. -// Must be pre-initialized. +// +// Must be pre-initialized. func NewVolumeManager( controllerAttachDetachEnabled bool, nodeName k8stypes.NodeName, diff --git a/pkg/kubelet/winstats/version.go b/pkg/kubelet/winstats/version.go index f56327273cd..80e9442f8ce 100644 --- a/pkg/kubelet/winstats/version.go +++ b/pkg/kubelet/winstats/version.go @@ -24,7 +24,7 @@ import ( "golang.org/x/sys/windows/registry" ) -//OSInfo is a convenience class for retrieving Windows OS information +// OSInfo is a convenience class for retrieving Windows OS information type OSInfo struct { BuildNumber, ProductName string MajorVersion, MinorVersion, UBR uint64 @@ -75,12 +75,12 @@ func GetOSInfo() (*OSInfo, error) { }, nil } -//GetPatchVersion returns full OS version with patch +// GetPatchVersion returns full OS version with patch func (o *OSInfo) GetPatchVersion() string { return fmt.Sprintf("%d.%d.%s.%d", o.MajorVersion, o.MinorVersion, o.BuildNumber, o.UBR) } -//GetBuild returns OS version upto build number +// GetBuild returns OS version upto build number func (o *OSInfo) GetBuild() string { return fmt.Sprintf("%d.%d.%s", o.MajorVersion, o.MinorVersion, o.BuildNumber) } diff --git a/pkg/printers/tablegenerator.go b/pkg/printers/tablegenerator.go index 654deaa4b8b..735c0324673 100644 --- a/pkg/printers/tablegenerator.go +++ b/pkg/printers/tablegenerator.go @@ -146,7 +146,9 @@ func (h *HumanReadableGenerator) TableHandler(columnDefinitions []metav1.TableCo // ValidateRowPrintHandlerFunc validates print handler signature. // printFunc is the function that will be called to print an object. // It must be of the following type: -// func printFunc(object ObjectType, options GenerateOptions) ([]metav1.TableRow, error) +// +// func printFunc(object ObjectType, options GenerateOptions) ([]metav1.TableRow, error) +// // where ObjectType is the type of the object that will be printed, and the first // return value is an array of rows, with each row containing a number of cells that // match the number of columns defined for that printer function. diff --git a/pkg/probe/http/http.go b/pkg/probe/http/http.go index c44f235b169..890f14f9f7d 100644 --- a/pkg/probe/http/http.go +++ b/pkg/probe/http/http.go @@ -38,7 +38,8 @@ const ( // New creates Prober that will skip TLS verification while probing. // followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// If disabled, redirects to other hosts will trigger a warning result. +// +// If disabled, redirects to other hosts will trigger a warning result. func New(followNonLocalRedirects bool) Prober { tlsConfig := &tls.Config{InsecureSkipVerify: true} return NewWithTLSConfig(tlsConfig, followNonLocalRedirects) @@ -46,7 +47,8 @@ func New(followNonLocalRedirects bool) Prober { // NewWithTLSConfig takes tls config as parameter. // followNonLocalRedirects configures whether the prober should follow redirects to a different hostname. -// If disabled, redirects to other hosts will trigger a warning result. +// +// If disabled, redirects to other hosts will trigger a warning result. func NewWithTLSConfig(config *tls.Config, followNonLocalRedirects bool) Prober { // We do not want the probe use node's local proxy set. transport := utilnet.SetTransportDefaults( diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index ee8e778fe0e..1969fb4b21f 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -202,8 +202,10 @@ func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc // if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example, // Add item // - pass as the pair. +// // Update item // - pass as the pair. +// // Delete item // - pass as the pair. func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool { diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index f99378a3f24..de9348e5ec4 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -304,11 +304,10 @@ type realIPGetter struct { // NodeIPs returns all LOCAL type IP addresses from host which are // taken as the Node IPs of NodePort service. Filtered addresses: // -// * Loopback addresses -// * Addresses of the "other" family (not handled by this proxier instance) -// * Link-local IPv6 addresses -// * Addresses on the created dummy device `kube-ipvs0` -// +// - Loopback addresses +// - Addresses of the "other" family (not handled by this proxier instance) +// - Link-local IPv6 addresses +// - Addresses on the created dummy device `kube-ipvs0` func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) { nodeAddress, err := r.nl.GetAllLocalAddresses() diff --git a/pkg/proxy/service.go b/pkg/proxy/service.go index 17e746fbfac..2346565ed25 100644 --- a/pkg/proxy/service.go +++ b/pkg/proxy/service.go @@ -289,8 +289,10 @@ func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, ipFamily v1.IP // otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example, // Add item // - pass as the pair. +// // Update item // - pass as the pair. +// // Delete item // - pass as the pair. func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool { @@ -415,17 +417,18 @@ func (sm *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP set // tell if a service is deleted or updated. // The returned value is one of the arguments of ServiceMap.unmerge(). // ServiceMap A Merge ServiceMap B will do following 2 things: -// * update ServiceMap A. -// * produce a string set which stores all other ServiceMap's ServicePortName.String(). +// - update ServiceMap A. +// - produce a string set which stores all other ServiceMap's ServicePortName.String(). +// // For example, // - A{} // - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - produce string set {"ns/cluster-ip:http"} +// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - produce string set {"ns/cluster-ip:http"} // - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}} // - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} -// - produce string set {"ns/cluster-ip:http"} +// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} +// - produce string set {"ns/cluster-ip:http"} func (sm *ServiceMap) merge(other ServiceMap) sets.String { // existingPorts is going to store all identifiers of all services in `other` ServiceMap. existingPorts := sets.NewString() diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index aada1db6b5b..74f85f136ee 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -297,10 +297,10 @@ func (info *endpointsInfo) GetZone() string { return "" } -//Uses mac prefix and IPv4 address to return a mac address -//This ensures mac addresses are unique for proper load balancing -//There is a possibility of MAC collisions but this Mac address is used for remote endpoints only -//and not sent on the wire. +// Uses mac prefix and IPv4 address to return a mac address +// This ensures mac addresses are unique for proper load balancing +// There is a possibility of MAC collisions but this Mac address is used for remote endpoints only +// and not sent on the wire. func conjureMac(macPrefix string, ip net.IP) string { if ip4 := ip.To4(); ip4 != nil { a, b, c, d := ip4[0], ip4[1], ip4[2], ip4[3] diff --git a/pkg/quota/v1/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go index cd299b7b157..f85fbde45d3 100644 --- a/pkg/quota/v1/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -330,8 +330,8 @@ func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, obje // PodUsageFunc returns the quota usage for a pod. // A pod is charged for quota if the following are not true. -// - pod has a terminal phase (failed or succeeded) -// - pod has been marked for deletion and grace period has expired +// - pod has a terminal phase (failed or succeeded) +// - pod has been marked for deletion and grace period has expired func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, error) { pod, err := toExternalPodOrError(obj) if err != nil { diff --git a/pkg/quota/v1/evaluator/core/services.go b/pkg/quota/v1/evaluator/core/services.go index dc061337327..b681a853ac6 100644 --- a/pkg/quota/v1/evaluator/core/services.go +++ b/pkg/quota/v1/evaluator/core/services.go @@ -161,7 +161,7 @@ func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.Us var _ quota.Evaluator = &serviceEvaluator{} -//GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return "" +// GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return "" func GetQuotaServiceType(service *corev1.Service) corev1.ServiceType { switch service.Spec.Type { case corev1.ServiceTypeNodePort: diff --git a/pkg/registry/apiserverinternal/storageversion/strategy.go b/pkg/registry/apiserverinternal/storageversion/strategy.go index 9fcf9a8d8ca..93ab9813764 100644 --- a/pkg/registry/apiserverinternal/storageversion/strategy.go +++ b/pkg/registry/apiserverinternal/storageversion/strategy.go @@ -82,7 +82,7 @@ func (storageVersionStrategy) WarningsOnCreate(ctx context.Context, obj runtime. func (storageVersionStrategy) Canonicalize(obj runtime.Object) { } -// Does not allow creating a StorageVersion object with a PUT request. +// Does not allow creating a StorageVersion object with a PUT request. func (storageVersionStrategy) AllowCreateOnUpdate() bool { return false } diff --git a/pkg/registry/apps/statefulset/strategy.go b/pkg/registry/apps/statefulset/strategy.go index 1a7d2386d7e..6d689bc5e14 100644 --- a/pkg/registry/apps/statefulset/strategy.go +++ b/pkg/registry/apps/statefulset/strategy.go @@ -112,9 +112,10 @@ func (statefulSetStrategy) PrepareForUpdate(ctx context.Context, obj, old runtim // dropStatefulSetDisabledFields drops fields that are not used if their associated feature gates // are not enabled. // The typical pattern is: -// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) { -// newSvc.Spec.MyFeature = nil -// } +// +// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) { +// newSvc.Spec.MyFeature = nil +// } func dropStatefulSetDisabledFields(newSS *apps.StatefulSet, oldSS *apps.StatefulSet) { if !utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC) { if oldSS == nil || oldSS.Spec.PersistentVolumeClaimRetentionPolicy == nil { diff --git a/pkg/registry/core/service/ipallocator/allocator.go b/pkg/registry/core/service/ipallocator/allocator.go index 3aa6d9a9c44..4aa5bab688a 100644 --- a/pkg/registry/core/service/ipallocator/allocator.go +++ b/pkg/registry/core/service/ipallocator/allocator.go @@ -65,18 +65,18 @@ func (e *ErrNotInRange) Error() string { // // The internal structure of the range is: // -// For CIDR 10.0.0.0/24 -// 254 addresses usable out of 256 total (minus base and broadcast IPs) -// The number of usable addresses is r.max +// For CIDR 10.0.0.0/24 +// 254 addresses usable out of 256 total (minus base and broadcast IPs) +// The number of usable addresses is r.max // -// CIDR base IP CIDR broadcast IP -// 10.0.0.0 10.0.0.255 -// | | -// 0 1 2 3 4 5 ... ... 253 254 255 -// | | -// r.base r.base + r.max -// | | -// offset #0 of r.allocated last offset of r.allocated +// CIDR base IP CIDR broadcast IP +// 10.0.0.0 10.0.0.255 +// | | +// 0 1 2 3 4 5 ... ... 253 254 255 +// | | +// r.base r.base + r.max +// | | +// offset #0 of r.allocated last offset of r.allocated type Range struct { net *net.IPNet // base is a cached version of the start IP in the CIDR range as a *big.Int diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index e75d9f9a67a..fa870760490 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -213,8 +213,8 @@ func (c *Repair) doRunOnce() error { // collectServiceNodePorts returns nodePorts specified in the Service. // Please note that: -// 1. same nodePort with *same* protocol will be duplicated as it is -// 2. same nodePort with *different* protocol will be deduplicated +// 1. same nodePort with *same* protocol will be duplicated as it is +// 2. same nodePort with *different* protocol will be deduplicated func collectServiceNodePorts(service *corev1.Service) []int { var servicePorts []int // map from nodePort to set of protocols diff --git a/pkg/registry/core/service/portallocator/operation.go b/pkg/registry/core/service/portallocator/operation.go index 641370d0289..07ec744bd49 100644 --- a/pkg/registry/core/service/portallocator/operation.go +++ b/pkg/registry/core/service/portallocator/operation.go @@ -23,11 +23,13 @@ package portallocator // On rollback we best-effort release any allocations we did. // // Pattern for use: -// op := StartPortAllocationOperation(...) -// defer op.Finish -// ... -// write(updatedOwner) -/// op.Commit() +// +// op := StartPortAllocationOperation(...) +// defer op.Finish +// ... +// write(updatedOwner) +// +// / op.Commit() type PortAllocationOperation struct { pa Interface allocated []int diff --git a/pkg/registry/core/service/strategy.go b/pkg/registry/core/service/strategy.go index c7598b68ae4..e76855795f8 100644 --- a/pkg/registry/core/service/strategy.go +++ b/pkg/registry/core/service/strategy.go @@ -113,9 +113,10 @@ func (svcStrategy) AllowUnconditionalUpdate() bool { // dropServiceDisabledFields drops fields that are not used if their associated feature gates // are not enabled. The typical pattern is: -// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) { -// newSvc.Spec.MyFeature = nil -// } +// +// if !utilfeature.DefaultFeatureGate.Enabled(features.MyFeature) && !myFeatureInUse(oldSvc) { +// newSvc.Spec.MyFeature = nil +// } func dropServiceDisabledFields(newSvc *api.Service, oldSvc *api.Service) { if !utilfeature.DefaultFeatureGate.Enabled(features.MixedProtocolLBService) { diff --git a/pkg/registry/flowcontrol/ensurer/strategy.go b/pkg/registry/flowcontrol/ensurer/strategy.go index d118a20fbde..8e21ee6e9a3 100644 --- a/pkg/registry/flowcontrol/ensurer/strategy.go +++ b/pkg/registry/flowcontrol/ensurer/strategy.go @@ -37,13 +37,14 @@ const ( // ensureStrategy provides a strategy for ensuring apf bootstrap configurationWrapper. // We have two types of configurationWrapper objects: -// - mandatory: the mandatory configurationWrapper objects are about ensuring that the P&F -// system itself won't crash; we have to be sure there's 'catch-all' place for -// everything to go. Any changes made by the cluster operators to these -// configurationWrapper objects will be stomped by the apiserver. // -// - suggested: additional configurationWrapper objects for initial behavior. -// the cluster operators have an option to edit or delete these configurationWrapper objects. +// - mandatory: the mandatory configurationWrapper objects are about ensuring that the P&F +// system itself won't crash; we have to be sure there's 'catch-all' place for +// everything to go. Any changes made by the cluster operators to these +// configurationWrapper objects will be stomped by the apiserver. +// +// - suggested: additional configurationWrapper objects for initial behavior. +// the cluster operators have an option to edit or delete these configurationWrapper objects. type ensureStrategy interface { // Name of the strategy, for now we have two: 'mandatory' and 'suggested'. // This comes handy in logging. @@ -310,11 +311,14 @@ func removeAutoUpdateEnabledConfiguration(wrapper configurationWrapper, name str // getDanglingBootstrapObjectNames returns a list of names of bootstrap // configuration objects that are potentially candidates for deletion from // the cluster, given a set of bootstrap and current configuration. -// - bootstrap: a set of hard coded configuration kube-apiserver maintains in-memory. -// - current: a set of configuration objects that exist on the cluster +// - bootstrap: a set of hard coded configuration kube-apiserver maintains in-memory. +// - current: a set of configuration objects that exist on the cluster +// // Any object present in current is added to the list if both a and b are true: -// a. the object in current is missing from the bootstrap configuration -// b. the object has the designated auto-update annotation key +// +// a. the object in current is missing from the bootstrap configuration +// b. the object has the designated auto-update annotation key +// // This function shares the common logic for both FlowSchema and // PriorityLevelConfiguration type and hence it accepts metav1.Object only. func getDanglingBootstrapObjectNames(bootstrap sets.String, current []metav1.Object) []string { diff --git a/pkg/scheduler/framework/plugins/helper/shape_score.go b/pkg/scheduler/framework/plugins/helper/shape_score.go index dc34b71290c..f12488b14c8 100644 --- a/pkg/scheduler/framework/plugins/helper/shape_score.go +++ b/pkg/scheduler/framework/plugins/helper/shape_score.go @@ -32,8 +32,10 @@ type FunctionShapePoint struct { // Shape[i].Score represents function values at meeting points. // // function f(p) is defined as: -// shape[0].Score for p < shape[0].Utilization -// shape[n-1].Score for p > shape[n-1].Utilization +// +// shape[0].Score for p < shape[0].Utilization +// shape[n-1].Score for p > shape[n-1].Utilization +// // and linear between points (p < shape[i].Utilization) func BuildBrokenLinearFunction(shape FunctionShape) func(int64) int64 { return func(p int64) int64 { diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index 778a70f2c7b..11e05e9753e 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -319,7 +319,7 @@ func satisfyExistingPodsAntiAffinity(state *preFilterState, nodeInfo *framework. return true } -// Checks if the node satisfies the incoming pod's anti-affinity rules. +// Checks if the node satisfies the incoming pod's anti-affinity rules. func satisfyPodAntiAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) bool { if len(state.antiAffinityCounts) > 0 { for _, term := range state.podInfo.RequiredAntiAffinityTerms { diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index e4b2a5044dd..7fea684e42d 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -135,25 +135,26 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr // the max in each dimension iteratively. In contrast, we sum the resource vectors for // regular containers since they run simultaneously. // -// The resources defined for Overhead should be added to the calculated Resource request sum +// # The resources defined for Overhead should be added to the calculated Resource request sum // // Example: // // Pod: -// InitContainers -// IC1: -// CPU: 2 -// Memory: 1G -// IC2: -// CPU: 2 -// Memory: 3G -// Containers -// C1: -// CPU: 2 -// Memory: 1G -// C2: -// CPU: 1 -// Memory: 1G +// +// InitContainers +// IC1: +// CPU: 2 +// Memory: 1G +// IC2: +// CPU: 2 +// Memory: 3G +// Containers +// C1: +// CPU: 2 +// Memory: 1G +// C2: +// CPU: 1 +// Memory: 1G // // Result: CPU: 3, Memory: 3G func computePodResourceRequest(pod *v1.Pod) *preFilterState { diff --git a/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go b/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go index 1ee97a3eb24..d553a02e190 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go +++ b/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go @@ -77,9 +77,9 @@ func (e *errObjectName) Error() string { } // assumeCache stores two pointers to represent a single object: -// * The pointer to the informer object. -// * The pointer to the latest object, which could be the same as -// the informer object, or an in-memory object. +// - The pointer to the informer object. +// - The pointer to the latest object, which could be the same as +// the informer object, or an in-memory object. // // An informer update always overrides the latest object pointer. // diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index b1a0bead666..1c116431b92 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -123,26 +123,26 @@ type InTreeToCSITranslator interface { // also considered along with the pod's other scheduling requirements. // // This integrates into the existing scheduler workflow as follows: -// 1. The scheduler takes a Pod off the scheduler queue and processes it serially: -// a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked -// here, pod volume information will be saved in current scheduling cycle state for later use. -// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. -// c. Invokes all score plugins. Future/TBD -// d. Selects the best node for the Pod. -// e. Invokes all reserve plugins. AssumePodVolumes() is invoked here. -// i. If PVC binding is required, cache in-memory only: -// * For manual binding: update PV objects for prebinding to the corresponding PVCs. -// * For dynamic provisioning: update PVC object with a selected node from c) -// * For the pod, which PVCs and PVs need API updates. -// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache, -// This is handled in the scheduler and not here. -// f. Asynchronously bind volumes and pod in a separate goroutine -// i. BindPodVolumes() is called first in PreBind phase. It makes all the necessary API updates and waits for -// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent -// back through the scheduler. -// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding. -// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue -// while the actual binding operation occurs in the background. +// 1. The scheduler takes a Pod off the scheduler queue and processes it serially: +// a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked +// here, pod volume information will be saved in current scheduling cycle state for later use. +// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. +// c. Invokes all score plugins. Future/TBD +// d. Selects the best node for the Pod. +// e. Invokes all reserve plugins. AssumePodVolumes() is invoked here. +// i. If PVC binding is required, cache in-memory only: +// * For manual binding: update PV objects for prebinding to the corresponding PVCs. +// * For dynamic provisioning: update PVC object with a selected node from c) +// * For the pod, which PVCs and PVs need API updates. +// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache, +// This is handled in the scheduler and not here. +// f. Asynchronously bind volumes and pod in a separate goroutine +// i. BindPodVolumes() is called first in PreBind phase. It makes all the necessary API updates and waits for +// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent +// back through the scheduler. +// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding. +// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue +// while the actual binding operation occurs in the background. type SchedulerVolumeBinder interface { // GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // and unbound with immediate binding (including prebound) diff --git a/pkg/scheduler/framework/preemption/preemption.go b/pkg/scheduler/framework/preemption/preemption.go index 673a007f9af..30bd8ac1aba 100644 --- a/pkg/scheduler/framework/preemption/preemption.go +++ b/pkg/scheduler/framework/preemption/preemption.go @@ -125,16 +125,20 @@ type Evaluator struct { // Preempt returns a PostFilterResult carrying suggested nominatedNodeName, along with a Status. // The semantics of returned varies on different scenarios: -// - . This denotes it's a transient/rare error that may be self-healed in future cycles. -// - . This status is mostly as expected like the preemptor is waiting for the -// victims to be fully terminated. -// - In both cases above, a nil PostFilterResult is returned to keep the pod's nominatedNodeName unchanged. // -// - . It indicates the pod cannot be scheduled even with preemption. -// In this case, a non-nil PostFilterResult is returned and result.NominatingMode instructs how to deal with -// the nominatedNodeName. -// - . It's the regular happy path -// and the non-empty nominatedNodeName will be applied to the preemptor pod. +// - . This denotes it's a transient/rare error that may be self-healed in future cycles. +// +// - . This status is mostly as expected like the preemptor is waiting for the +// victims to be fully terminated. +// +// - In both cases above, a nil PostFilterResult is returned to keep the pod's nominatedNodeName unchanged. +// +// - . It indicates the pod cannot be scheduled even with preemption. +// In this case, a non-nil PostFilterResult is returned and result.NominatingMode instructs how to deal with +// the nominatedNodeName. +// +// - . It's the regular happy path +// and the non-empty nominatedNodeName will be applied to the preemptor pod. func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { // 0) Fetch the latest version of . // It's safe to directly fetch pod here. Because the informer cache has already been diff --git a/pkg/scheduler/internal/cache/interface.go b/pkg/scheduler/internal/cache/interface.go index 4cd6d8f8869..f6298bd346b 100644 --- a/pkg/scheduler/internal/cache/interface.go +++ b/pkg/scheduler/internal/cache/interface.go @@ -31,31 +31,31 @@ import ( // // State Machine of a pod's events in scheduler's cache: // +// +-------------------------------------------+ +----+ +// | Add | | | +// | | | | Update +// + Assume Add v v | // -// +-------------------------------------------+ +----+ -// | Add | | | -// | | | | Update -// + Assume Add v v | -//Initial +--------> Assumed +------------+---> Added <--+ -// ^ + + | + -// | | | | | -// | | | Add | | Remove -// | | | | | -// | | | + | -// +----------------+ +-----------> Expired +----> Deleted -// Forget Expire +// Initial +--------> Assumed +------------+---> Added <--+ // +// ^ + + | + +// | | | | | +// | | | Add | | Remove +// | | | | | +// | | | + | +// +----------------+ +-----------> Expired +----> Deleted +// Forget Expire // // Note that an assumed pod can expire, because if we haven't received Add event notifying us // for a while, there might be some problems and we shouldn't keep the pod in cache anymore. // // Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache. // Based on existing use cases, we are making the following assumptions: -// - No pod would be assumed twice -// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. -// - If a pod wasn't added, it wouldn't be removed or updated. -// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, -// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. +// - No pod would be assumed twice +// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. +// - If a pod wasn't added, it wouldn't be removed or updated. +// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, +// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. type Cache interface { // NodeCount returns the number of nodes in the cache. // DO NOT use outside of tests. diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index e706593b38a..8de1a09c03b 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -127,11 +127,11 @@ func NominatedNodeName(pod *v1.Pod) string { // The head of PriorityQueue is the highest priority pending pod. This structure // has two sub queues and a additional data structure, namely: activeQ, // backoffQ and unschedulablePods. -// - activeQ holds pods that are being considered for scheduling. -// - backoffQ holds pods that moved from unschedulablePods and will move to -// activeQ when their backoff periods complete. -// - unschedulablePods holds pods that were already attempted for scheduling and -// are currently determined to be unschedulable. +// - activeQ holds pods that are being considered for scheduling. +// - backoffQ holds pods that moved from unschedulablePods and will move to +// activeQ when their backoff periods complete. +// - unschedulablePods holds pods that were already attempted for scheduling and +// are currently determined to be unschedulable. type PriorityQueue struct { // PodNominator abstracts the operations to maintain nominated Pods. framework.PodNominator diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index 1315557c4f3..03e88b341cb 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -2204,10 +2204,10 @@ func TestFindFitPredicateCallCounts(t *testing.T) { } // The point of this test is to show that you: -// - get the same priority for a zero-request pod as for a pod with the defaults requests, -// both when the zero-request pod is already on the node and when the zero-request pod -// is the one being scheduled. -// - don't get the same score no matter what we schedule. +// - get the same priority for a zero-request pod as for a pod with the defaults requests, +// both when the zero-request pod is already on the node and when the zero-request pod +// is the one being scheduled. +// - don't get the same score no matter what we schedule. func TestZeroRequest(t *testing.T) { // A pod with no resources. We expect spreading to count it as having the default resources. noResources := v1.PodSpec{ diff --git a/pkg/scheduler/testing/workload_prep.go b/pkg/scheduler/testing/workload_prep.go index d8e7b006833..b53be6b9db4 100644 --- a/pkg/scheduler/testing/workload_prep.go +++ b/pkg/scheduler/testing/workload_prep.go @@ -31,10 +31,10 @@ type keyVal struct { // It builds a fake cluster containing running Pods and Nodes. // The size of Pods and Nodes are determined by input arguments. // The specs of Pods and Nodes are generated with the following rules: -// - Each generated node is applied with a unique label: "node: node". -// - Each generated node is applied with a rotating label: "zone: zone[0-9]". -// - Depending on the input labels, each generated pod will be applied with -// label "key1", "key1,key2", ..., "key1,key2,...,keyN" in a rotating manner. +// - Each generated node is applied with a unique label: "node: node". +// - Each generated node is applied with a rotating label: "zone: zone[0-9]". +// - Depending on the input labels, each generated pod will be applied with +// label "key1", "key1,key2", ..., "key1,key2,...,keyN" in a rotating manner. func MakeNodesAndPodsForEvenPodsSpread(labels map[string]string, existingPodsNum, allNodesNum, filteredNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node, filteredNodes []*v1.Node) { var labelPairs []keyVal for k, v := range labels { diff --git a/pkg/serviceaccount/jwt_test.go b/pkg/serviceaccount/jwt_test.go index ded059f412e..79dde04acdf 100644 --- a/pkg/serviceaccount/jwt_test.go +++ b/pkg/serviceaccount/jwt_test.go @@ -60,9 +60,9 @@ WwIDAQAB // Obtained by: // -// 1. Serializing rsaPublicKey as DER -// 2. Taking the SHA256 of the DER bytes -// 3. URLSafe Base64-encoding the sha bytes +// 1. Serializing rsaPublicKey as DER +// 2. Taking the SHA256 of the DER bytes +// 3. URLSafe Base64-encoding the sha bytes const rsaKeyID = "JHJehTTTZlsspKHT-GaJxK7Kd1NQgZJu3fyK6K_QDYU" // Fake value for testing. @@ -111,9 +111,9 @@ X2i8uIp/C/ASqiIGUeeKQtX0/IR3qCXyThP/dbCiHrF3v1cuhBOHY8CLVg== // Obtained by: // -// 1. Serializing ecdsaPublicKey as DER -// 2. Taking the SHA256 of the DER bytes -// 3. URLSafe Base64-encoding the sha bytes +// 1. Serializing ecdsaPublicKey as DER +// 2. Taking the SHA256 of the DER bytes +// 3. URLSafe Base64-encoding the sha bytes const ecdsaKeyID = "SoABiieYuNx4UdqYvZRVeuC6SihxgLrhLy9peHMHpTc" func getPrivateKey(data string) interface{} { diff --git a/pkg/util/coverage/fake_test_deps.go b/pkg/util/coverage/fake_test_deps.go index a9506d4c77f..bb7ef3f1a23 100644 --- a/pkg/util/coverage/fake_test_deps.go +++ b/pkg/util/coverage/fake_test_deps.go @@ -23,6 +23,7 @@ import ( // This is an implementation of testing.testDeps. It doesn't need to do anything, because // no tests are actually run. It does need a concrete implementation of at least ImportPath, // which is called unconditionally when running tests. +// //nolint:unused // U1000 see comment above, we know it's unused normally. type fakeTestDeps struct{} diff --git a/pkg/util/ipset/ipset.go b/pkg/util/ipset/ipset.go index 2b145f08f8b..05fa90be5db 100644 --- a/pkg/util/ipset/ipset.go +++ b/pkg/util/ipset/ipset.go @@ -57,15 +57,15 @@ const IPSetCmd = "ipset" // EntryMemberPattern is the regular expression pattern of ipset member list. // The raw output of ipset command `ipset list {set}` is similar to, -//Name: foobar -//Type: hash:ip,port -//Revision: 2 -//Header: family inet hashsize 1024 maxelem 65536 -//Size in memory: 16592 -//References: 0 -//Members: -//192.168.1.2,tcp:8080 -//192.168.1.1,udp:53 +// Name: foobar +// Type: hash:ip,port +// Revision: 2 +// Header: family inet hashsize 1024 maxelem 65536 +// Size in memory: 16592 +// References: 0 +// Members: +// 192.168.1.2,tcp:8080 +// 192.168.1.1,udp:53 var EntryMemberPattern = "(?m)^(.*\n)*Members:\n" // VersionPattern is the regular expression pattern of ipset version string. @@ -121,7 +121,7 @@ func (set *IPSet) Validate() error { return nil } -//setIPSetDefaults sets some IPSet fields if not present to their default values. +// setIPSetDefaults sets some IPSet fields if not present to their default values. func (set *IPSet) setIPSetDefaults() { // Setting default values if not present if set.HashSize == 0 { diff --git a/pkg/util/ipvs/testing/fake.go b/pkg/util/ipvs/testing/fake.go index c674023941d..5e4ad8e3264 100644 --- a/pkg/util/ipvs/testing/fake.go +++ b/pkg/util/ipvs/testing/fake.go @@ -25,7 +25,7 @@ import ( utilipvs "k8s.io/kubernetes/pkg/util/ipvs" ) -//FakeIPVS no-op implementation of ipvs Interface +// FakeIPVS no-op implementation of ipvs Interface type FakeIPVS struct { Scheduler string Services map[ServiceKey]*utilipvs.VirtualServer @@ -53,7 +53,7 @@ func (r *RealServerKey) String() string { return net.JoinHostPort(r.Address.String(), strconv.Itoa(int(r.Port))) } -//NewFake creates a fake ipvs implementation - a cache store. +// NewFake creates a fake ipvs implementation - a cache store. func NewFake() *FakeIPVS { return &FakeIPVS{ Services: make(map[ServiceKey]*utilipvs.VirtualServer), @@ -76,7 +76,7 @@ func toRealServerKey(rs *utilipvs.RealServer) *RealServerKey { } } -//AddVirtualServer is a fake implementation, it simply adds the VirtualServer into the cache store. +// AddVirtualServer is a fake implementation, it simply adds the VirtualServer into the cache store. func (f *FakeIPVS) AddVirtualServer(serv *utilipvs.VirtualServer) error { if serv == nil { return fmt.Errorf("failed to add virtual server, error: virtual server can't be nil") @@ -88,7 +88,7 @@ func (f *FakeIPVS) AddVirtualServer(serv *utilipvs.VirtualServer) error { return nil } -//UpdateVirtualServer is a fake implementation, it updates the VirtualServer in the cache store. +// UpdateVirtualServer is a fake implementation, it updates the VirtualServer in the cache store. func (f *FakeIPVS) UpdateVirtualServer(serv *utilipvs.VirtualServer) error { if serv == nil { return fmt.Errorf("failed to update service, service can't be nil") @@ -98,7 +98,7 @@ func (f *FakeIPVS) UpdateVirtualServer(serv *utilipvs.VirtualServer) error { return nil } -//DeleteVirtualServer is a fake implementation, it simply deletes the VirtualServer from the cache store. +// DeleteVirtualServer is a fake implementation, it simply deletes the VirtualServer from the cache store. func (f *FakeIPVS) DeleteVirtualServer(serv *utilipvs.VirtualServer) error { if serv == nil { return fmt.Errorf("failed to delete service: service can't be nil") @@ -110,7 +110,7 @@ func (f *FakeIPVS) DeleteVirtualServer(serv *utilipvs.VirtualServer) error { return nil } -//GetVirtualServer is a fake implementation, it tries to find a specific VirtualServer from the cache store. +// GetVirtualServer is a fake implementation, it tries to find a specific VirtualServer from the cache store. func (f *FakeIPVS) GetVirtualServer(serv *utilipvs.VirtualServer) (*utilipvs.VirtualServer, error) { if serv == nil { return nil, fmt.Errorf("failed to get service: service can't be nil") @@ -123,7 +123,7 @@ func (f *FakeIPVS) GetVirtualServer(serv *utilipvs.VirtualServer) (*utilipvs.Vir return nil, fmt.Errorf("not found serv: %v", key.String()) } -//GetVirtualServers is a fake implementation, it simply returns all VirtualServers in the cache store. +// GetVirtualServers is a fake implementation, it simply returns all VirtualServers in the cache store. func (f *FakeIPVS) GetVirtualServers() ([]*utilipvs.VirtualServer, error) { res := make([]*utilipvs.VirtualServer, 0) for _, svc := range f.Services { @@ -132,7 +132,7 @@ func (f *FakeIPVS) GetVirtualServers() ([]*utilipvs.VirtualServer, error) { return res, nil } -//Flush is a fake implementation, it simply clears the cache store. +// Flush is a fake implementation, it simply clears the cache store. func (f *FakeIPVS) Flush() error { // directly drop old data f.Services = nil @@ -140,7 +140,7 @@ func (f *FakeIPVS) Flush() error { return nil } -//AddRealServer is a fake implementation, it simply creates a RealServer for a VirtualServer in the cache store. +// AddRealServer is a fake implementation, it simply creates a RealServer for a VirtualServer in the cache store. func (f *FakeIPVS) AddRealServer(serv *utilipvs.VirtualServer, dest *utilipvs.RealServer) error { if serv == nil || dest == nil { return fmt.Errorf("failed to add destination for service, neither service nor destination shouldn't be nil") @@ -158,7 +158,7 @@ func (f *FakeIPVS) AddRealServer(serv *utilipvs.VirtualServer, dest *utilipvs.Re return nil } -//GetRealServers is a fake implementation, it simply returns all RealServers in the cache store. +// GetRealServers is a fake implementation, it simply returns all RealServers in the cache store. func (f *FakeIPVS) GetRealServers(serv *utilipvs.VirtualServer) ([]*utilipvs.RealServer, error) { if serv == nil { return nil, fmt.Errorf("failed to get destination for nil service") @@ -170,7 +170,7 @@ func (f *FakeIPVS) GetRealServers(serv *utilipvs.VirtualServer) ([]*utilipvs.Rea return f.Destinations[key], nil } -//DeleteRealServer is a fake implementation, it deletes the real server in the cache store. +// DeleteRealServer is a fake implementation, it deletes the real server in the cache store. func (f *FakeIPVS) DeleteRealServer(serv *utilipvs.VirtualServer, dest *utilipvs.RealServer) error { if serv == nil || dest == nil { return fmt.Errorf("failed to delete destination, neither service nor destination can't be nil") diff --git a/pkg/volume/awsebs/aws_ebs_block.go b/pkg/volume/awsebs/aws_ebs_block.go index c25808c4a93..d924aedcc2f 100644 --- a/pkg/volume/awsebs/aws_ebs_block.go +++ b/pkg/volume/awsebs/aws_ebs_block.go @@ -151,7 +151,8 @@ var _ volume.BlockVolumeMapper = &awsElasticBlockStoreMapper{} // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID -// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX +// +// plugins/kubernetes.io/aws-ebs/volumeDevices/vol-XXXXXX func (ebs *awsElasticBlockStore) GetGlobalMapPath(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { diff --git a/pkg/volume/azuredd/azure_dd_block.go b/pkg/volume/azuredd/azure_dd_block.go index e6d6ce6ac11..713bb289b33 100644 --- a/pkg/volume/azuredd/azure_dd_block.go +++ b/pkg/volume/azuredd/azure_dd_block.go @@ -144,7 +144,8 @@ var _ volume.BlockVolumeMapper = &azureDataDiskMapper{} // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID -// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX +// +// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX func (disk *dataDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { diff --git a/pkg/volume/cinder/cinder_block.go b/pkg/volume/cinder/cinder_block.go index edb1891fdf9..618115770e3 100644 --- a/pkg/volume/cinder/cinder_block.go +++ b/pkg/volume/cinder/cinder_block.go @@ -155,7 +155,8 @@ var _ volume.BlockVolumeMapper = &cinderVolumeMapper{} // GetGlobalMapPath returns global map path and error // path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID -// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX +// +// plugins/kubernetes.io/cinder/volumeDevices/vol-XXXXXX func (cd *cinderVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) { pdName, _, _, err := getVolumeInfo(spec) if err != nil { diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 5621318a2e4..1e85796a216 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -41,7 +41,7 @@ import ( utilstrings "k8s.io/utils/strings" ) -//TODO (vladimirvivien) move this in a central loc later +// TODO (vladimirvivien) move this in a central loc later var ( volDataKey = struct { specVolID, diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 592aaaeaaf6..f922fec485a 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -548,7 +548,7 @@ func (p *csiPlugin) constructVolSourceSpec(volSpecName, driverName string) *volu return volume.NewSpecFromVolume(vol) } -//constructPVSourceSpec constructs volume.Spec with CSIPersistentVolumeSource +// constructPVSourceSpec constructs volume.Spec with CSIPersistentVolumeSource func (p *csiPlugin) constructPVSourceSpec(volSpecName, driverName, volumeHandle string) *volume.Spec { fsMode := api.PersistentVolumeFilesystem pv := &api.PersistentVolume{ diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index 622624829ee..3bb73f943a7 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -286,9 +286,11 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu // - Searches a file whose name is {pod uuid} under volume plugin directory. // - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID. // - Once volumePluginDependentPath is obtained, store volume information to VolumeSource +// // examples: -// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} +// +// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} +// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName) blkutil := volumepathhandler.NewBlockVolumePathHandler() diff --git a/pkg/volume/flocker/flocker.go b/pkg/volume/flocker/flocker.go index 85442526fa1..dcf94a361ba 100644 --- a/pkg/volume/flocker/flocker.go +++ b/pkg/volume/flocker/flocker.go @@ -260,12 +260,12 @@ func (b *flockerVolumeMounter) newFlockerClient() (*flockerapi.Client, error) { SetUpAt will setup a Flocker volume following this flow of calls to the Flocker control service: -1. Get the dataset id for the given volume name/dir -2. It should already be there, if it's not the user needs to manually create it -3. Check the current Primary UUID -4. If it doesn't match with the Primary UUID that we got on 2, then we will - need to update the Primary UUID for this volume. -5. Wait until the Primary UUID was updated or timeout. + 1. Get the dataset id for the given volume name/dir + 2. It should already be there, if it's not the user needs to manually create it + 3. Check the current Primary UUID + 4. If it doesn't match with the Primary UUID that we got on 2, then we will + need to update the Primary UUID for this volume. + 5. Wait until the Primary UUID was updated or timeout. */ func (b *flockerVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error { var err error diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index e915db930cf..07bdc926719 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -415,7 +415,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } -//getVolumeInfo returns 'path' and 'readonly' field values from the provided glusterfs spec. +// getVolumeInfo returns 'path' and 'readonly' field values from the provided glusterfs spec. func getVolumeInfo(spec *volume.Spec) (string, bool, error) { if spec.Volume != nil && spec.Volume.Glusterfs != nil { return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil @@ -559,9 +559,9 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll } // Return the gid table for a storage class. -// - If this is the first time, fill it with all the gids -// used in PVs of this storage class by traversing the PVs. -// - Adapt the range of the table to the current range of the SC. +// - If this is the first time, fill it with all the gids +// used in PVs of this storage class by traversing the PVs. +// - Adapt the range of the table to the current range of the SC. func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) { plugin.gidTableLock.Lock() gidTable, ok := plugin.gidTable[className] diff --git a/pkg/volume/glusterfs/glusterfs_minmax.go b/pkg/volume/glusterfs/glusterfs_minmax.go index fc1f2887106..f99bdc71486 100644 --- a/pkg/volume/glusterfs/glusterfs_minmax.go +++ b/pkg/volume/glusterfs/glusterfs_minmax.go @@ -44,7 +44,7 @@ var ( ErrInternal = errors.New("internal error") ) -//MinMaxAllocator defines allocator struct. +// MinMaxAllocator defines allocator struct. type MinMaxAllocator struct { lock sync.Mutex min int @@ -79,7 +79,7 @@ func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) { }, nil } -//SetRange defines the range/pool with provided min and max values. +// SetRange defines the range/pool with provided min and max values. func (a *MinMaxAllocator) SetRange(min, max int) error { if min > max { return ErrInvalidRange @@ -108,7 +108,7 @@ func (a *MinMaxAllocator) SetRange(min, max int) error { return nil } -//Allocate allocates provided value in the allocator and mark it as used. +// Allocate allocates provided value in the allocator and mark it as used. func (a *MinMaxAllocator) Allocate(i int) (bool, error) { a.lock.Lock() defer a.lock.Unlock() @@ -127,7 +127,7 @@ func (a *MinMaxAllocator) Allocate(i int) (bool, error) { return true, nil } -//AllocateNext allocates next value from the allocator. +// AllocateNext allocates next value from the allocator. func (a *MinMaxAllocator) AllocateNext() (int, bool, error) { a.lock.Lock() defer a.lock.Unlock() @@ -150,7 +150,7 @@ func (a *MinMaxAllocator) AllocateNext() (int, bool, error) { return 0, false, ErrInternal } -//Release free/delete provided value from the allocator. +// Release free/delete provided value from the allocator. func (a *MinMaxAllocator) Release(i int) error { a.lock.Lock() defer a.lock.Unlock() @@ -173,7 +173,7 @@ func (a *MinMaxAllocator) has(i int) bool { return ok } -//Has check whether the provided value is used in the allocator +// Has check whether the provided value is used in the allocator func (a *MinMaxAllocator) Has(i int) bool { a.lock.Lock() defer a.lock.Unlock() @@ -181,7 +181,7 @@ func (a *MinMaxAllocator) Has(i int) bool { return a.has(i) } -//Free returns the number of free values in the allocator. +// Free returns the number of free values in the allocator. func (a *MinMaxAllocator) Free() int { a.lock.Lock() defer a.lock.Unlock() diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 445b828ae43..830bd5f717b 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) -// ProbeVolumePlugins is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&iscsiPlugin{}} } diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index c1944945fd8..bf8b996d08c 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -1043,11 +1043,11 @@ func (pm *VolumePluginMgr) Run(stopCh <-chan struct{}) { // plugin implementations. The following attributes can be overridden per // plugin via configuration: // -// 1. pod.Spec.Volumes[0].VolumeSource must be overridden. Recycler +// 1. pod.Spec.Volumes[0].VolumeSource must be overridden. Recycler // implementations without a valid VolumeSource will fail. -// 2. pod.GenerateName helps distinguish recycler pods by name. Recommended. +// 2. pod.GenerateName helps distinguish recycler pods by name. Recommended. // Default is "pv-recycler-". -// 3. pod.Spec.ActiveDeadlineSeconds gives the recycler pod a maximum timeout +// 3. pod.Spec.ActiveDeadlineSeconds gives the recycler pod a maximum timeout // before failing. Recommended. Default is 60 seconds. // // See HostPath and NFS for working recycler examples diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index 58c62e7f1fe..8f014c1940e 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -282,9 +282,10 @@ func createDriverClient(hostname string, port int32) (*osdclient.Client, error) } // getPortworxDriver returns a Portworx volume driver which can be used for cluster wide operations. -// Operations like create and delete volume don't need to be restricted to local volume host since -// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to -// the Portworx node that will own/owns the data. +// +// Operations like create and delete volume don't need to be restricted to local volume host since +// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to +// the Portworx node that will own/owns the data. func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { // check if existing saved client is valid if isValid, _ := isClientValid(util.portworxClient); isValid { @@ -319,10 +320,11 @@ func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) } // getLocalPortworxDriver returns driver connected to Portworx API server on volume host. -// This is required to force certain operations (mount, unmount, detach, attach) to -// go to the volume host instead of the k8s service which might route it to any host. This pertains to how -// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to -// see the pod container mounts (specifically /var/lib/kubelet/pods/) +// +// This is required to force certain operations (mount, unmount, detach, attach) to +// go to the volume host instead of the k8s service which might route it to any host. This pertains to how +// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to +// see the pod container mounts (specifically /var/lib/kubelet/pods/) func (util *portworxVolumeUtil) getLocalPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { if util.portworxClient != nil { // check if existing saved client is valid diff --git a/pkg/volume/rbd/attacher.go b/pkg/volume/rbd/attacher.go index 9766a2895c4..c931b14da67 100644 --- a/pkg/volume/rbd/attacher.go +++ b/pkg/volume/rbd/attacher.go @@ -199,11 +199,12 @@ var _ volume.DeviceUnmounter = &rbdDetacher{} // mount of the RBD image. This is called once all bind mounts have been // unmounted. // Internally, it does four things: -// - Unmount device from deviceMountPath. -// - Detach device from the node. -// - Remove lock if found. (No need to check volume readonly or not, because -// device is not on the node anymore, it's safe to remove lock.) -// - Remove the deviceMountPath at last. +// - Unmount device from deviceMountPath. +// - Detach device from the node. +// - Remove lock if found. (No need to check volume readonly or not, because +// device is not on the node anymore, it's safe to remove lock.) +// - Remove the deviceMountPath at last. +// // This method is idempotent, callers are responsible for retrying on failure. func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { if pathExists, pathErr := mount.PathExists(deviceMountPath); pathErr != nil { diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index ef2f12056df..4d5e3468203 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -165,7 +165,8 @@ func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin { // FakeVolumePlugin is useful for testing. It tries to be a fully compliant // plugin, but all it does is make empty directories. // Use as: -// volume.RegisterPlugin(&FakePlugin{"fake-name"}) +// +// volume.RegisterPlugin(&FakePlugin{"fake-name"}) type FakeVolumePlugin struct { sync.RWMutex PluginName string @@ -1177,7 +1178,7 @@ func (fc *FakeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies [] var _ volumepathhandler.BlockVolumePathHandler = &FakeVolumePathHandler{} -//NewDeviceHandler Create a new IoHandler implementation +// NewDeviceHandler Create a new IoHandler implementation func NewBlockVolumePathHandler() volumepathhandler.BlockVolumePathHandler { return &FakeVolumePathHandler{} } diff --git a/pkg/volume/util/atomic_writer.go b/pkg/volume/util/atomic_writer.go index 805647ab7ed..94428f6ffc8 100644 --- a/pkg/volume/util/atomic_writer.go +++ b/pkg/volume/util/atomic_writer.go @@ -42,9 +42,9 @@ const ( // // Note: // -// 1. AtomicWriter reserves the set of pathnames starting with `..`. -// 2. AtomicWriter offers no concurrency guarantees and must be synchronized -// by the caller. +// 1. AtomicWriter reserves the set of pathnames starting with `..`. +// 2. AtomicWriter offers no concurrency guarantees and must be synchronized +// by the caller. // // The visible files in this volume are symlinks to files in the writer's data // directory. Actual files are stored in a hidden timestamped directory which @@ -89,36 +89,40 @@ const ( // // The Write algorithm is: // -// 1. The payload is validated; if the payload is invalid, the function returns -// 2.  The current timestamped directory is detected by reading the data directory -// symlink -// 3. The old version of the volume is walked to determine whether any -// portion of the payload was deleted and is still present on disk. -// 4. The data in the current timestamped directory is compared to the projected -// data to determine if an update is required. -// 5.  A new timestamped dir is created -// 6. The payload is written to the new timestamped directory -// 7.  A symlink to the new timestamped directory ..data_tmp is created that will -// become the new data directory -// 8.  The new data directory symlink is renamed to the data directory; rename is atomic -// 9.  Symlinks and directory for new user-visible files are created (if needed). +// 1. The payload is validated; if the payload is invalid, the function returns +// 2.  The current timestamped directory is detected by reading the data directory +// symlink // -// For example, consider the files: -// /podName -// /user/labels -// /k8s/annotations +// 3. The old version of the volume is walked to determine whether any +// portion of the payload was deleted and is still present on disk. // -// The user visible files are symbolic links into the internal data directory: -// /podName -> ..data/podName -// /usr -> ..data/usr -// /k8s -> ..data/k8s +// 4. The data in the current timestamped directory is compared to the projected +// data to determine if an update is required. +// 5.  A new timestamped dir is created +// +// 6. The payload is written to the new timestamped directory +// 7.  A symlink to the new timestamped directory ..data_tmp is created that will +// become the new data directory +// 8.  The new data directory symlink is renamed to the data directory; rename is atomic +// 9.  Symlinks and directory for new user-visible files are created (if needed). +// +// For example, consider the files: +// /podName +// /user/labels +// /k8s/annotations +// +// The user visible files are symbolic links into the internal data directory: +// /podName -> ..data/podName +// /usr -> ..data/usr +// /k8s -> ..data/k8s +// +// The data directory itself is a link to a timestamped directory with +// the real data: +// /..data -> ..2016_02_01_15_04_05.12345678/ +// NOTE(claudiub): We need to create these symlinks AFTER we've finished creating and +// linking everything else. On Windows, if a target does not exist, the created symlink +// will not work properly if the target ends up being a directory. // -// The data directory itself is a link to a timestamped directory with -// the real data: -// /..data -> ..2016_02_01_15_04_05.12345678/ -// NOTE(claudiub): We need to create these symlinks AFTER we've finished creating and -// linking everything else. On Windows, if a target does not exist, the created symlink -// will not work properly if the target ends up being a directory. // 10. Old paths are removed from the user-visible portion of the target directory // 11.  The previous timestamped directory is removed, if it exists func (w *AtomicWriter) Write(payload map[string]FileProjection) error { diff --git a/pkg/volume/util/device_util.go b/pkg/volume/util/device_util.go index 1d0791ee05c..4b2fc88546b 100644 --- a/pkg/volume/util/device_util.go +++ b/pkg/volume/util/device_util.go @@ -16,7 +16,7 @@ limitations under the License. package util -//DeviceUtil is a util for common device methods +// DeviceUtil is a util for common device methods type DeviceUtil interface { FindMultipathDeviceForDevice(disk string) string FindSlaveDevicesOnMultipath(disk string) []string @@ -28,7 +28,7 @@ type deviceHandler struct { getIo IoUtil } -//NewDeviceHandler Create a new IoHandler implementation +// NewDeviceHandler Create a new IoHandler implementation func NewDeviceHandler(io IoUtil) DeviceUtil { return &deviceHandler{getIo: io} } diff --git a/pkg/volume/util/device_util_linux.go b/pkg/volume/util/device_util_linux.go index ae7078b23ab..30b4939916c 100644 --- a/pkg/volume/util/device_util_linux.go +++ b/pkg/volume/util/device_util_linux.go @@ -89,10 +89,11 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { // GetISCSIPortalHostMapForTarget given a target iqn, find all the scsi hosts logged into // that target. Returns a map of iSCSI portals (string) to SCSI host numbers (integers). -// For example: { -// "192.168.30.7:3260": 2, -// "192.168.30.8:3260": 3, -// } +// +// For example: { +// "192.168.30.7:3260": 2, +// "192.168.30.8:3260": 3, +// } func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) { portalHostMap := make(map[string]int) io := handler.getIo diff --git a/pkg/volume/util/hostutil/hostutil_unsupported.go b/pkg/volume/util/hostutil/hostutil_unsupported.go index c54a7b23f91..0c25c352426 100644 --- a/pkg/volume/util/hostutil/hostutil_unsupported.go +++ b/pkg/volume/util/hostutil/hostutil_unsupported.go @@ -93,7 +93,7 @@ func (hu *HostUtil) GetSELinuxSupport(pathname string) (bool, error) { return false, errUnsupported } -//GetMode always returns an error on unsupported platforms +// GetMode always returns an error on unsupported platforms func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) { return 0, errUnsupported } diff --git a/pkg/volume/util/io_util.go b/pkg/volume/util/io_util.go index aff453e196c..8d65a6e48d2 100644 --- a/pkg/volume/util/io_util.go +++ b/pkg/volume/util/io_util.go @@ -32,7 +32,7 @@ type IoUtil interface { type osIOHandler struct{} -//NewIOHandler Create a new IoHandler implementation +// NewIOHandler Create a new IoHandler implementation func NewIOHandler() IoUtil { return &osIOHandler{} } diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index 2afbc9a53ac..b7197dbdfe4 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -43,9 +43,9 @@ type RecycleEventRecorder func(eventtype, message string) // function deletes it as it is not able to judge if it is an old recycler // or user has forged a fake recycler to block Kubernetes from recycling.// // -// pod - the pod designed by a volume plugin to recycle the volume. pod.Name -// will be overwritten with unique name based on PV.Name. -// client - kube client for API operations. +// pod - the pod designed by a volume plugin to recycle the volume. pod.Name +// will be overwritten with unique name based on PV.Name. +// client - kube client for API operations. func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error { return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder)) } diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index f772bd5869f..807ea4b379f 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -631,7 +631,7 @@ func HasMountRefs(mountPath string, mountRefs []string) bool { return false } -//WriteVolumeCache flush disk data given the spcified mount path +// WriteVolumeCache flush disk data given the spcified mount path func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error { // If runtime os is windows, execute Write-VolumeCache powershell command on the disk if runtime.GOOS == "windows" { diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index b0031961a3b..aae1b39acb1 100644 --- a/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go +++ b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -152,7 +152,8 @@ func getLoopDeviceFromSysfs(path string) (string, error) { // corresponding to map path symlink, and then return global map path with pod uuid. // (See pkg/volume/volume.go for details on a global map path and a pod device map path.) // ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX -// globalMapPath/{pod uuid} bind mount: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +// +// globalMapPath/{pod uuid} bind mount: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { var globalMapPathUUID string // Find symbolic link named pod uuid under plugin dir @@ -211,7 +212,8 @@ func compareBindMountAndSymlinks(global, pod string) (bool, error) { // getDeviceMajorMinor returns major/minor number for the path with below format: // major:minor (in hex) // ex) -// fc:10 +// +// fc:10 func getDeviceMajorMinor(path string) (string, error) { var stat unix.Stat_t diff --git a/plugin/pkg/admission/imagepolicy/admission.go b/plugin/pkg/admission/imagepolicy/admission.go index 2cd7ddda22d..6fd7f0dfad7 100644 --- a/plugin/pkg/admission/imagepolicy/admission.go +++ b/plugin/pkg/admission/imagepolicy/admission.go @@ -210,15 +210,15 @@ func (a *Plugin) admitPod(ctx context.Context, pod *api.Pod, attributes admissio // The config file is specified by --admission-control-config-file and has the // following format for a webhook: // -// { -// "imagePolicy": { -// "kubeConfigFile": "path/to/kubeconfig/for/backend", -// "allowTTL": 30, # time in s to cache approval -// "denyTTL": 30, # time in s to cache denial -// "retryBackoff": 500, # time in ms to wait between retries -// "defaultAllow": true # determines behavior if the webhook backend fails -// } -// } +// { +// "imagePolicy": { +// "kubeConfigFile": "path/to/kubeconfig/for/backend", +// "allowTTL": 30, # time in s to cache approval +// "denyTTL": 30, # time in s to cache denial +// "retryBackoff": 500, # time in ms to wait between retries +// "defaultAllow": true # determines behavior if the webhook backend fails +// } +// } // // The config file may be json or yaml. // @@ -227,19 +227,19 @@ func (a *Plugin) admitPod(ctx context.Context, pod *api.Pod, attributes admissio // // The kubeconfig's cluster field is used to refer to the remote service, user refers to the returned authorizer. // -// # clusters refers to the remote service. -// clusters: -// - name: name-of-remote-imagepolicy-service -// cluster: -// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. -// server: https://images.example.com/policy # URL of remote service to query. Must use 'https'. +// # clusters refers to the remote service. +// clusters: +// - name: name-of-remote-imagepolicy-service +// cluster: +// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. +// server: https://images.example.com/policy # URL of remote service to query. Must use 'https'. // -// # users refers to the API server's webhook configuration. -// users: -// - name: name-of-api-server -// user: -// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use -// client-key: /path/to/key.pem # key matching the cert +// # users refers to the API server's webhook configuration. +// users: +// - name: name-of-api-server +// user: +// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use +// client-key: /path/to/key.pem # key matching the cert // // For additional HTTP configuration, refer to the kubeconfig documentation // http://kubernetes.io/v1.1/docs/user-guide/kubeconfig-file.html. diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index e578c333a5f..d2e4b14de7a 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -76,9 +76,10 @@ type pluginConfig struct { // If the file is not supplied, it defaults to "" // The format in a file: // podNodeSelectorPluginConfig: -// clusterDefaultNodeSelector: -// namespace1: -// namespace2: +// +// clusterDefaultNodeSelector: +// namespace1: +// namespace2: func readConfig(config io.Reader) *pluginConfig { defaultConfig := &pluginConfig{} if config == nil || reflect.ValueOf(config).IsNil() { diff --git a/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go b/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go index 4f6f04a2e43..f3f9a98c3a3 100644 --- a/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go +++ b/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go @@ -56,8 +56,7 @@ type TokenAuthenticator struct { // tokenErrorf prints a error message for a secret that has matched a bearer // token but fails to meet some other criteria. // -// tokenErrorf(secret, "has invalid value for key %s", key) -// +// tokenErrorf(secret, "has invalid value for key %s", key) func tokenErrorf(s *corev1.Secret, format string, i ...interface{}) { format = fmt.Sprintf("Bootstrap secret %s/%s matching bearer token ", s.Namespace, s.Name) + format klog.V(3).Infof(format, i...) @@ -69,26 +68,25 @@ func tokenErrorf(s *corev1.Secret, format string, i ...interface{}) { // // All secrets must be of type "bootstrap.kubernetes.io/token". An example secret: // -// apiVersion: v1 -// kind: Secret -// metadata: -// # Name MUST be of form "bootstrap-token-( token id )". -// name: bootstrap-token-( token id ) -// namespace: kube-system -// # Only secrets of this type will be evaluated. -// type: bootstrap.kubernetes.io/token -// data: -// token-secret: ( private part of token ) -// token-id: ( token id ) -// # Required key usage. -// usage-bootstrap-authentication: true -// auth-extra-groups: "system:bootstrappers:custom-group1,system:bootstrappers:custom-group2" -// # May also contain an expiry. +// apiVersion: v1 +// kind: Secret +// metadata: +// # Name MUST be of form "bootstrap-token-( token id )". +// name: bootstrap-token-( token id ) +// namespace: kube-system +// # Only secrets of this type will be evaluated. +// type: bootstrap.kubernetes.io/token +// data: +// token-secret: ( private part of token ) +// token-id: ( token id ) +// # Required key usage. +// usage-bootstrap-authentication: true +// auth-extra-groups: "system:bootstrappers:custom-group1,system:bootstrappers:custom-group2" +// # May also contain an expiry. // // Tokens are expected to be of the form: // -// ( token-id ).( token-secret ) -// +// ( token-id ).( token-secret ) func (t *TokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { tokenID, tokenSecret, err := bootstraptokenutil.ParseToken(token) if err != nil { diff --git a/plugin/pkg/auth/authorizer/node/graph.go b/plugin/pkg/auth/authorizer/node/graph.go index 840d1dd7bea..ef275df85cd 100644 --- a/plugin/pkg/auth/authorizer/node/graph.go +++ b/plugin/pkg/auth/authorizer/node/graph.go @@ -327,12 +327,12 @@ func (g *Graph) recomputeDestinationIndex_locked(n graph.Node) { // AddPod should only be called once spec.NodeName is populated. // It sets up edges for the following relationships (which are immutable for a pod once bound to a node): // -// pod -> node +// pod -> node // -// secret -> pod -// configmap -> pod -// pvc -> pod -// svcacct -> pod +// secret -> pod +// configmap -> pod +// pvc -> pod +// svcacct -> pod func (g *Graph) AddPod(pod *corev1.Pod) { start := time.Now() defer func() { @@ -407,9 +407,9 @@ func (g *Graph) DeletePod(name, namespace string) { // AddPV sets up edges for the following relationships: // -// secret -> pv +// secret -> pv // -// pv -> pvc +// pv -> pvc func (g *Graph) AddPV(pv *corev1.PersistentVolume) { start := time.Now() defer func() { @@ -448,7 +448,7 @@ func (g *Graph) DeletePV(name string) { // AddVolumeAttachment sets up edges for the following relationships: // -// volume attachment -> node +// volume attachment -> node func (g *Graph) AddVolumeAttachment(attachmentName, nodeName string) { start := time.Now() defer func() { diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer.go b/plugin/pkg/auth/authorizer/node/node_authorizer.go index 2203b549e3a..f3a5ab4339c 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -38,17 +38,17 @@ import ( ) // NodeAuthorizer authorizes requests from kubelets, with the following logic: -// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject -// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject -// 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node: -// node <- configmap -// node <- pod -// node <- pod <- secret -// node <- pod <- configmap -// node <- pod <- pvc -// node <- pod <- pvc <- pv -// node <- pod <- pvc <- pv <- secret -// 4. For other resources, authorize all nodes uniformly using statically defined rules +// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject +// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject +// 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node: +// node <- configmap +// node <- pod +// node <- pod <- secret +// node <- pod <- configmap +// node <- pod <- pvc +// node <- pod <- pvc <- pv +// node <- pod <- pvc <- pv <- secret +// 4. For other resources, authorize all nodes uniformly using statically defined rules type NodeAuthorizer struct { graph *Graph identifier nodeidentifier.NodeIdentifier diff --git a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index 37ac0d32670..63c45d54d71 100644 --- a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -44,7 +44,7 @@ message ServerStorageVersion { repeated string decodableVersions = 3; } -// Storage version of a specific resource. +// Storage version of a specific resource. message StorageVersion { // The name is .. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; diff --git a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types.go b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types.go index bfa249e135c..a0437b5074c 100644 --- a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types.go +++ b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -24,7 +24,7 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Storage version of a specific resource. +// Storage version of a specific resource. type StorageVersion struct { metav1.TypeMeta `json:",inline"` // The name is .. diff --git a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 297ed08a715..6de93420069 100644 --- a/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (ServerStorageVersion) SwaggerDoc() map[string]string { } var map_StorageVersion = map[string]string{ - "": "\n Storage version of a specific resource.", + "": "Storage version of a specific resource.", "metadata": "The name is ..", "spec": "Spec is an empty spec. It is here to comply with Kubernetes API style.", "status": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", diff --git a/staging/src/k8s.io/api/apps/v1/generated.proto b/staging/src/k8s.io/api/apps/v1/generated.proto index f728176f83e..5b27b92cf08 100644 --- a/staging/src/k8s.io/api/apps/v1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1/generated.proto @@ -569,8 +569,9 @@ message RollingUpdateStatefulSetStrategy { // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { diff --git a/staging/src/k8s.io/api/apps/v1/types.go b/staging/src/k8s.io/api/apps/v1/types.go index c6c87c93c6e..e541d63d79e 100644 --- a/staging/src/k8s.io/api/apps/v1/types.go +++ b/staging/src/k8s.io/api/apps/v1/types.go @@ -39,8 +39,9 @@ const ( // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { diff --git a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 448fb02da84..3cb5e4e760d 100644 --- a/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", diff --git a/staging/src/k8s.io/api/apps/v1beta1/generated.proto b/staging/src/k8s.io/api/apps/v1beta1/generated.proto index c85fb73cfc1..5823613ed20 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/apps/v1beta1/generated.proto @@ -332,8 +332,9 @@ message ScaleStatus { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { diff --git a/staging/src/k8s.io/api/apps/v1beta1/types.go b/staging/src/k8s.io/api/apps/v1beta1/types.go index 6351f726d54..4632c63a79e 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/types.go +++ b/staging/src/k8s.io/api/apps/v1beta1/types.go @@ -88,8 +88,9 @@ type Scale struct { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { diff --git a/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index ff1b7b9c7ec..47f3414533d 100644 --- a/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -198,7 +198,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } diff --git a/staging/src/k8s.io/api/apps/v1beta2/generated.proto b/staging/src/k8s.io/api/apps/v1beta2/generated.proto index 6c92b5260b7..9103c6ba1c8 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/generated.proto +++ b/staging/src/k8s.io/api/apps/v1beta2/generated.proto @@ -619,8 +619,9 @@ message ScaleStatus { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. message StatefulSet { diff --git a/staging/src/k8s.io/api/apps/v1beta2/types.go b/staging/src/k8s.io/api/apps/v1beta2/types.go index 10f437d6a96..4e21bb267e1 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/types.go +++ b/staging/src/k8s.io/api/apps/v1beta2/types.go @@ -94,8 +94,9 @@ type Scale struct { // more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// // The StatefulSet guarantees that a given network identity will always // map to the same storage identity. type StatefulSet struct { diff --git a/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 3bc4af4c99f..29c746d0b1c 100644 --- a/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -322,7 +322,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } diff --git a/staging/src/k8s.io/api/certificates/v1/types.go b/staging/src/k8s.io/api/certificates/v1/types.go index 752c6697347..af5efb5165b 100644 --- a/staging/src/k8s.io/api/certificates/v1/types.go +++ b/staging/src/k8s.io/api/certificates/v1/types.go @@ -275,7 +275,9 @@ type CertificateSigningRequestList struct { // KeyUsage specifies valid usage contexts for keys. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// // +enum type KeyUsage string diff --git a/staging/src/k8s.io/api/certificates/v1beta1/types.go b/staging/src/k8s.io/api/certificates/v1beta1/types.go index 877312d97e3..fe7aab9704d 100644 --- a/staging/src/k8s.io/api/certificates/v1beta1/types.go +++ b/staging/src/k8s.io/api/certificates/v1beta1/types.go @@ -230,7 +230,8 @@ type CertificateSigningRequestList struct { // KeyUsages specifies valid usage contexts for keys. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +// +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string const ( diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 544055108f4..c27df040044 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1071,13 +1071,16 @@ message EndpointPort { // EndpointSubset is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } +// +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// // The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -1096,17 +1099,18 @@ message EndpointSubset { } // Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] +// +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -2544,6 +2548,7 @@ message ObjectFieldSelector { // and the version of the actual struct is irrelevant. // 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type // will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3241,7 +3246,8 @@ message PodExecOptions { // IP address information for entries in the (plural) PodIPs field. // Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// +// IP: An IP address allocated to the pod. Routable at least within the cluster. message PodIP { // ip is an IP address (IPv4 or IPv6) assigned to the pod optional string ip = 1; diff --git a/staging/src/k8s.io/api/core/v1/toleration.go b/staging/src/k8s.io/api/core/v1/toleration.go index b203d335b6b..9341abf8919 100644 --- a/staging/src/k8s.io/api/core/v1/toleration.go +++ b/staging/src/k8s.io/api/core/v1/toleration.go @@ -29,11 +29,14 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: // (1) Empty toleration.effect means to match all taint effects, -// otherwise taint effect must equal to toleration.effect. +// +// otherwise taint effect must equal to toleration.effect. +// // (2) If toleration.operator is 'Exists', it means to match all taint values. // (3) Empty toleration.key means to match all taint keys. -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index b4b58a17d4a..02360a2335c 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3646,7 +3646,8 @@ type PodDNSConfigOption struct { // IP address information for entries in the (plural) PodIPs field. // Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// +// IP: An IP address allocated to the pod. Routable at least within the cluster. type PodIP struct { // ip is an IP address (IPv4 or IPv6) assigned to the pod IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` @@ -4724,17 +4725,18 @@ type ServiceAccountList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] +// +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -4756,13 +4758,16 @@ type Endpoints struct { // EndpointSubset is a group of addresses with a common set of ports. The // expanded set of endpoints is the Cartesian product of Addresses x Ports. // For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } +// +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// // The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -5635,6 +5640,7 @@ type ServiceProxyOptions struct { // and the version of the actual struct is irrelevant. // 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type // will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// // Instead of using this type, create a locally provided and used type that is well-focused on your reference. // For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index bc9e2e039f0..513b28fca92 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -515,7 +515,7 @@ func (EndpointPort) SwaggerDoc() map[string]string { } var map_EndpointSubset = map[string]string{ - "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", "ports": "Port numbers available on the related IP addresses.", @@ -526,7 +526,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { } var map_Endpoints = map[string]string{ - "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -1535,7 +1535,7 @@ func (PodExecOptions) SwaggerDoc() map[string]string { } var map_PodIP = map[string]string{ - "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.", "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", } diff --git a/staging/src/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/staging/src/k8s.io/api/flowcontrol/v1alpha1/generated.proto index 347c9d3fa01..455a0fcaf92 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -153,8 +153,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1alpha1/types.go b/staging/src/k8s.io/api/flowcontrol/v1alpha1/types.go index 5af677e2f53..c52c9aa7b9c 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/staging/src/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -415,8 +415,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index 1827be02d7e..aebb7f64cc1 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -111,7 +111,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", } diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto b/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto index 474d520df74..74327452517 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -153,8 +153,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go b/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go index b4573264298..d6744f6f692 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go @@ -451,8 +451,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index b3752b6fb7d..9f8eacead06 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -111,7 +111,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", } diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto b/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto index 80bbaedea2f..ed57cda8ec6 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -153,8 +153,8 @@ message LimitResponse { // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go b/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go index 408681e998c..3ee00f29355 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go @@ -447,8 +447,8 @@ const ( // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // It addresses two issues: -// * How are requests for this priority level limited? -// * What should be done with requests that exceed the limit? +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4775a8e993c..7efe477d26e 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -111,7 +111,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { } var map_LimitedPriorityLevelConfiguration = map[string]string{ - "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", } diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go index 6f3b0357466..47ebccfb802 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go index 5743937a957..6a18c23b83c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go index 998a29ba655..7757441436f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go @@ -92,9 +92,10 @@ func getBaseEnv() (*cel.Env, error) { // CompilationResult for each ValidationRule, or an error. declType is expected to be a CEL DeclType corresponding // to the structural schema. // Each CompilationResult may contain: -/// - non-nil Program, nil Error: The program was compiled successfully -// - nil Program, non-nil Error: Compilation resulted in an error -// - nil Program, nil Error: The provided rule was empty so compilation was not attempted +// / - non-nil Program, nil Error: The program was compiled successfully +// - nil Program, non-nil Error: Compilation resulted in an error +// - nil Program, nil Error: The provided rule was empty so compilation was not attempted +// // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit as input. func Compile(s *schema.Structural, declType *celmodel.DeclType, perCallLimit uint64) ([]CompilationResult, error) { t := time.Now() diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/lists.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/lists.go index 2960e16e90a..fe51dc87fdb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/lists.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/lists.go @@ -32,65 +32,61 @@ import ( // // Returns true if the provided list of comparable elements is sorted, else returns false. // -// >.isSorted() , T must be a comparable type +// >.isSorted() , T must be a comparable type // // Examples: // -// [1, 2, 3].isSorted() // return true -// ['a', 'b', 'b', 'c'].isSorted() // return true -// [2.0, 1.0].isSorted() // return false -// [1].isSorted() // return true -// [].isSorted() // return true -// +// [1, 2, 3].isSorted() // return true +// ['a', 'b', 'b', 'c'].isSorted() // return true +// [2.0, 1.0].isSorted() // return false +// [1].isSorted() // return true +// [].isSorted() // return true // // sum // // Returns the sum of the elements of the provided list. Supports CEL number (int, uint, double) and duration types. // -// >.sum() , T must be a numeric type or a duration +// >.sum() , T must be a numeric type or a duration // // Examples: // -// [1, 3].sum() // returns 4 -// [1.0, 3.0].sum() // returns 4.0 -// ['1m', '1s'].sum() // returns '1m1s' -// emptyIntList.sum() // returns 0 -// emptyDoubleList.sum() // returns 0.0 -// [].sum() // returns 0 -// +// [1, 3].sum() // returns 4 +// [1.0, 3.0].sum() // returns 4.0 +// ['1m', '1s'].sum() // returns '1m1s' +// emptyIntList.sum() // returns 0 +// emptyDoubleList.sum() // returns 0.0 +// [].sum() // returns 0 // // min / max // // Returns the minimum/maximum valued element of the provided list. Supports all comparable types. // If the list is empty, an error is returned. // -// >.min() , T must be a comparable type -// >.max() , T must be a comparable type +// >.min() , T must be a comparable type +// >.max() , T must be a comparable type // // Examples: // -// [1, 3].min() // returns 1 -// [1, 3].max() // returns 3 -// [].min() // error -// [1].min() // returns 1 -// ([0] + emptyList).min() // returns 0 -// +// [1, 3].min() // returns 1 +// [1, 3].max() // returns 3 +// [].min() // error +// [1].min() // returns 1 +// ([0] + emptyList).min() // returns 0 // // indexOf / lastIndexOf // // Returns either the first or last positional index of the provided element in the list. // If the element is not found, -1 is returned. Supports all equatable types. // -// >.indexOf() , T must be an equatable type -// >.lastIndexOf() , T must be an equatable type +// >.indexOf() , T must be an equatable type +// >.lastIndexOf() , T must be an equatable type // // Examples: // -// [1, 2, 2, 3].indexOf(2) // returns 1 -// ['a', 'b', 'b', 'c'].lastIndexOf('b') // returns 2 -// [1.0].indexOf(1.1) // returns -1 -// [].indexOf('string') // returns -1 -// +// [1, 2, 2, 3].indexOf(2) // returns 1 +// ['a', 'b', 'b', 'c'].lastIndexOf('b') // returns 2 +// [1.0].indexOf(1.1) // returns -1 +// [].indexOf('string') // returns -1 func Lists() cel.EnvOption { return cel.Lib(listsLib) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/regex.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/regex.go index cbb4faac82a..6db5ef19575 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/regex.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/regex.go @@ -32,18 +32,17 @@ import ( // Returns substrings that match the provided regular expression. find returns the first match. findAll may optionally // be provided a limit. If the limit is set and >= 0, no more than the limit number of matches are returned. // -// .find() -// .findAll() > -// .findAll(, ) > +// .find() +// .findAll() > +// .findAll(, ) > // // Examples: // -// "abc 123".find('[0-9]*') // returns '123' -// "abc 123".find('xyz') // returns '' -// "123 abc 456".findAll('[0-9]*') // returns ['123', '456'] -// "123 abc 456".findAll('[0-9]*', 1) // returns ['123'] -// "123 abc 456".findAll('xyz') // returns [] -// +// "abc 123".find('[0-9]*') // returns '123' +// "abc 123".find('xyz') // returns '' +// "123 abc 456".findAll('[0-9]*') // returns ['123', '456'] +// "123 abc 456".findAll('[0-9]*', 1) // returns ['123'] +// "123 abc 456".findAll('xyz') // returns [] func Regex() cel.EnvOption { return cel.Lib(regexLib) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/urls.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/urls.go index 7744931422c..d2c8483bed8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/urls.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library/urls.go @@ -32,41 +32,46 @@ import ( // Converts a string to a URL or results in an error if the string is not a valid URL. The URL must be an absolute URI // or an absolute path. // -// url() +// url() // // Examples: // -// url('https://user:pass@example.com:80/path?query=val#fragment') // returns a URL -// url('/absolute-path') // returns a URL -// url('https://a:b:c/') // error -// url('../relative-path') // error +// url('https://user:pass@example.com:80/path?query=val#fragment') // returns a URL +// url('/absolute-path') // returns a URL +// url('https://a:b:c/') // error +// url('../relative-path') // error // // isURL // // Returns true if a string is a valid URL. The URL must be an absolute URI or an absolute path. // -// isURL( ) +// isURL( ) // // Examples: // -// isURL('https://user:pass@example.com:80/path?query=val#fragment') // returns true -// isURL('/absolute-path') // returns true -// isURL('https://a:b:c/') // returns false -// isURL('../relative-path') // returns false -// +// isURL('https://user:pass@example.com:80/path?query=val#fragment') // returns true +// isURL('/absolute-path') // returns true +// isURL('https://a:b:c/') // returns false +// isURL('../relative-path') // returns false // // getScheme / getHost / getHostname / getPort / getEscapedPath / getQuery // // Return the parsed components of a URL. -// - getScheme: If absent in the URL, returns an empty string. -// - getHostname: IPv6 addresses are returned with braces, e.g. "[::1]". If absent in the URL, returns an empty string. -// - getHost: IPv6 addresses are returned without braces, e.g. "::1". If absent in the URL, returns an empty string. -// - getEscapedPath: The string returned by getEscapedPath is URL escaped, e.g. "with space" becomes "with%20space". -// If absent in the URL, returns an empty string. -// - getPort: If absent in the URL, returns an empty string. -// - getQuery: Returns the query parameters in "matrix" form where a repeated query key is interpreted to -// mean that there are multiple values for that key. The keys and values are returned unescaped. -// If absent in the URL, returns an empty map. +// +// - getScheme: If absent in the URL, returns an empty string. +// +// - getHostname: IPv6 addresses are returned with braces, e.g. "[::1]". If absent in the URL, returns an empty string. +// +// - getHost: IPv6 addresses are returned without braces, e.g. "::1". If absent in the URL, returns an empty string. +// +// - getEscapedPath: The string returned by getEscapedPath is URL escaped, e.g. "with space" becomes "with%20space". +// If absent in the URL, returns an empty string. +// +// - getPort: If absent in the URL, returns an empty string. +// +// - getQuery: Returns the query parameters in "matrix" form where a repeated query key is interpreted to +// mean that there are multiple values for that key. The keys and values are returned unescaped. +// If absent in the URL, returns an empty map. // // .getScheme() // .getHost() @@ -77,28 +82,27 @@ import ( // // Examples: // -// url('/path').getScheme() // returns '' -// url('https://example.com/').getScheme() // returns 'https' -// url('https://example.com:80/').getHost() // returns 'example.com:80' -// url('https://example.com/').getHost() // returns 'example.com' -// url('https://[::1]:80/').getHost() // returns '[::1]:80' -// url('https://[::1]/').getHost() // returns '[::1]' -// url('/path').getHost() // returns '' -// url('https://example.com:80/').getHostname() // returns 'example.com' -// url('https://127.0.0.1:80/').getHostname() // returns '127.0.0.1' -// url('https://[::1]:80/').getHostname() // returns '::1' -// url('/path').getHostname() // returns '' -// url('https://example.com:80/').getPort() // returns '80' -// url('https://example.com/').getPort() // returns '' -// url('/path').getPort() // returns '' -// url('https://example.com/path').getEscapedPath() // returns '/path' -// url('https://example.com/path with spaces/').getEscapedPath() // returns '/path%20with%20spaces/' -// url('https://example.com').getEscapedPath() // returns '' -// url('https://example.com/path?k1=a&k2=b&k2=c').getQuery() // returns { 'k1': ['a'], 'k2': ['b', 'c']} -// url('https://example.com/path?key with spaces=value with spaces').getQuery() // returns { 'key with spaces': ['value with spaces']} -// url('https://example.com/path?').getQuery() // returns {} -// url('https://example.com/path').getQuery() // returns {} -// +// url('/path').getScheme() // returns '' +// url('https://example.com/').getScheme() // returns 'https' +// url('https://example.com:80/').getHost() // returns 'example.com:80' +// url('https://example.com/').getHost() // returns 'example.com' +// url('https://[::1]:80/').getHost() // returns '[::1]:80' +// url('https://[::1]/').getHost() // returns '[::1]' +// url('/path').getHost() // returns '' +// url('https://example.com:80/').getHostname() // returns 'example.com' +// url('https://127.0.0.1:80/').getHostname() // returns '127.0.0.1' +// url('https://[::1]:80/').getHostname() // returns '::1' +// url('/path').getHostname() // returns '' +// url('https://example.com:80/').getPort() // returns '80' +// url('https://example.com/').getPort() // returns '' +// url('/path').getPort() // returns '' +// url('https://example.com/path').getEscapedPath() // returns '/path' +// url('https://example.com/path with spaces/').getEscapedPath() // returns '/path%20with%20spaces/' +// url('https://example.com').getEscapedPath() // returns '' +// url('https://example.com/path?k1=a&k2=b&k2=c').getQuery() // returns { 'k1': ['a'], 'k2': ['b', 'c']} +// url('https://example.com/path?key with spaces=value with spaces').getQuery() // returns { 'key with spaces': ['value with spaces']} +// url('https://example.com/path?').getQuery() // returns {} +// url('https://example.com/path').getQuery() // returns {} func URLs() cel.EnvOption { return cel.Lib(urlsLib) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go index cb75c2b9fd3..fba2b75af5f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go @@ -34,8 +34,8 @@ type AccessorFunc func(obj map[string]interface{}) (x interface{}, found bool, e // // With obj, acc, _ := someSurroundingObjectFunc(x) we get: // -// acc(obj) == x -// reflect.DeepEqual(acc(DeepCopy(obj), x) == x +// acc(obj) == x +// reflect.DeepEqual(acc(DeepCopy(obj), x) == x // // where x is the original instance for slices and maps. // @@ -44,19 +44,19 @@ type AccessorFunc func(obj map[string]interface{}) (x interface{}, found bool, e // // Example (ignoring the last two return values): // -// NewRootObjectFunc()(x) == x -// NewRootObjectFunc().Index()(x) == [x] -// NewRootObjectFunc().Index().Child("foo") == [{"foo": x}] -// NewRootObjectFunc().Index().Child("foo").Child("bar") == [{"foo": {"bar":x}}] -// NewRootObjectFunc().Index().Child("foo").Child("bar").Index() == [{"foo": {"bar":[x]}}] +// NewRootObjectFunc()(x) == x +// NewRootObjectFunc().Index()(x) == [x] +// NewRootObjectFunc().Index().Child("foo") == [{"foo": x}] +// NewRootObjectFunc().Index().Child("foo").Child("bar") == [{"foo": {"bar":x}}] +// NewRootObjectFunc().Index().Child("foo").Child("bar").Index() == [{"foo": {"bar":[x]}}] // // and: // -// NewRootObjectFunc(), then acc(x) == x -// NewRootObjectFunc().Index(), then acc([x]) == x -// NewRootObjectFunc().Index().Child("foo"), then acc([{"foo": x}]) == x -// NewRootObjectFunc().Index().Child("foo").Child("bar"), then acc([{"foo": {"bar":x}}]) == x -// NewRootObjectFunc().Index().Child("foo").Child("bar").Index(), then acc([{"foo": {"bar":[x]}}]) == x +// NewRootObjectFunc(), then acc(x) == x +// NewRootObjectFunc().Index(), then acc([x]) == x +// NewRootObjectFunc().Index().Child("foo"), then acc([{"foo": x}]) == x +// NewRootObjectFunc().Index().Child("foo").Child("bar"), then acc([{"foo": {"bar":x}}]) == x +// NewRootObjectFunc().Index().Child("foo").Child("bar").Index(), then acc([{"foo": {"bar":[x]}}]) == x type SurroundingObjectFunc func(focus interface{}) (map[string]interface{}, AccessorFunc, error) // NewRootObjectFunc returns the identity function. The passed focus value diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go index e2966341a7b..f7f29e70c4b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -48,14 +48,14 @@ const ( // * RawExtension: for every schema with `x-kubernetes-embedded-resource: true`, `x-kubernetes-preserve-unknown-fields: true` and `type: object` are set // * IntOrString: for `x-kubernetes-int-or-string: true` either `type` is empty under `anyOf` and `allOf` or the schema structure is one of these: // -// 1) anyOf: -// - type: integer -// - type: string -// 2) allOf: -// - anyOf: -// - type: integer -// - type: string -// - ... zero or more +// 1. anyOf: +// - type: integer +// - type: string +// 2. allOf: +// - anyOf: +// - type: integer +// - type: string +// - ... zero or more // // * every specified field or array in s is also specified outside of value validation. // * metadata at the root can only restrict the name and generateName, and not be specified at all in nested contexts. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go index 9aba8e85e16..7f9da840e18 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go index 144c20666d2..25fca99178b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/fake/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/fake/register.go index 48d558cc93b..16dccfca5ab 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/fake/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/scheme/register.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/scheme/register.go index 144c20666d2..25fca99178b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/scheme/register.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/deprecated/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go index 0f1dc2d0643..af72d9ce7ed 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go @@ -69,8 +69,9 @@ func NewDefaultTestServerOptions() *TestServerInstanceOptions { // and location of the tmpdir are returned. // // Note: we return a tear-down func instead of a stop channel because the later will leak temporary -// files that because Golang testing's call to os.Exit will not give a stop channel go routine -// enough time to remove temporary files. +// +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporary files. func StartTestServer(t Logger, _ *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) { stopCh := make(chan struct{}) var errCh chan error diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go index 48f402de17f..531e6c317c4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go @@ -248,12 +248,13 @@ type builder struct { } // subresource is a handy method to get subresource name. Valid inputs are: -// input output -// "" "" -// "/" "" -// "/{name}" "" -// "/{name}/scale" "scale" -// "/{name}/scale/foo" invalid input +// +// input output +// "" "" +// "/" "" +// "/{name}" "" +// "/{name}/scale" "scale" +// "/{name}/scale/foo" invalid input func subresource(path string) string { parts := strings.Split(path, "/") if len(parts) <= 2 { @@ -303,9 +304,10 @@ func (b *builder) descriptionFor(path, operationVerb string) string { } // buildRoute returns a RouteBuilder for WebService to consume and builds path in swagger -// action can be one of: GET, PUT, PATCH, POST, DELETE; -// verb can be one of: list, read, replace, patch, create, delete, deletecollection; -// sample is the sample Go type for response type. +// +// action can be one of: GET, PUT, PATCH, POST, DELETE; +// verb can be one of: list, read, replace, patch, create, delete, deletecollection; +// sample is the sample Go type for response type. func (b *builder) buildRoute(root, path, httpMethod, actionVerb, operationVerb string, sample interface{}) *restful.RouteBuilder { var namespaced string if b.namespaced { @@ -489,7 +491,8 @@ func generateBuildDefinitionsFunc() { } // addTypeMetaProperties adds Kubernetes-specific type meta properties to input schema: -// apiVersion and kind +// +// apiVersion and kind func addTypeMetaProperties(s *spec.Schema, v2 bool) { s.SetProperty("apiVersion", getDefinition(typeMetaType, v2).SchemaProps.Properties["apiVersion"]) s.SetProperty("kind", getDefinition(typeMetaType, v2).SchemaProps.Properties["kind"]) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go index 8ceea79b0dc..241dd297389 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go @@ -3304,7 +3304,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Type: []string{"object"}, }, }, @@ -3315,7 +3315,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "apiVersion": { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/webhook.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/webhook.go index 0f2faeff550..b283b9a0cdb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/webhook.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/webhook.go @@ -209,7 +209,8 @@ func NewObjectConverterWebhookHandler(t *testing.T, converterFunc ObjectConverte } // localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw diff --git a/staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip/roundtrip.go b/staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip/roundtrip.go index 2502c98b722..61868c82a68 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip/roundtrip.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip/roundtrip.go @@ -107,16 +107,15 @@ var globalNonRoundTrippableTypes = sets.NewString( // GlobalNonRoundTrippableTypes returns the kinds that are effectively reserved across all GroupVersions. // They don't roundtrip and thus can be excluded in any custom/downstream roundtrip tests // -// kinds := scheme.AllKnownTypes() -// for gvk := range kinds { -// if roundtrip.GlobalNonRoundTrippableTypes().Has(gvk.Kind) { -// continue -// } -// t.Run(gvk.Group+"."+gvk.Version+"."+gvk.Kind, func(t *testing.T) { -// // roundtrip test -// }) -// } -// +// kinds := scheme.AllKnownTypes() +// for gvk := range kinds { +// if roundtrip.GlobalNonRoundTrippableTypes().Has(gvk.Kind) { +// continue +// } +// t.Run(gvk.Group+"."+gvk.Version+"."+gvk.Kind, func(t *testing.T) { +// // roundtrip test +// }) +// } func GlobalNonRoundTrippableTypes() sets.String { return sets.NewString(globalNonRoundTrippableTypes.List()...) } @@ -294,11 +293,11 @@ func roundTripOfExternalType(t *testing.T, scheme *runtime.Scheme, codecFactory // // For internal types this means // -// internal -> external -> json/protobuf -> external -> internal. +// internal -> external -> json/protobuf -> external -> internal. // // For external types this means // -// external -> json/protobuf -> external. +// external -> json/protobuf -> external. func roundTrip(t *testing.T, scheme *runtime.Scheme, codec runtime.Codec, object runtime.Object) { printer := spew.ConfigState{DisableMethods: true} original := object diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go index 00874f89cc6..60c8209de02 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -24,9 +24,9 @@ import ( // SetStatusCondition sets the corresponding condition in conditions to newCondition. // conditions must be non-nil. -// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status) -// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) +// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to +// newCondition, LastTransitionTime is set to now if the new status differs from the old status) +// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { if conditions == nil { return diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go index dcee8f5e948..899d3e8a667 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,7 +40,8 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// creating and enforcing that lists implement a list accessor. +// +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go index 0e8837e0930..44d877ecf56 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go @@ -27,6 +27,7 @@ import ( // 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy // kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, // all other groups alphabetical. +// // TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests // TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked. func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto index aa9d7d95e76..ddd0db8fbd1 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -32,7 +32,9 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource"; // // ``` // ::= -// (Note that may be empty, from the "" case in .) +// +// (Note that may be empty, from the "" case in .) +// // ::= 0 | 1 | ... | 9 // ::= | // ::= | . | . | . @@ -40,9 +42,13 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource"; // ::= | // ::= | | // ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// // ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// // ::= "e" | "E" // ``` // diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go index 158b6a6429d..f1068450f2d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -36,7 +36,9 @@ import ( // // ``` // ::= -// (Note that may be empty, from the "" case in .) +// +// (Note that may be empty, from the "" case in .) +// // ::= 0 | 1 | ... | 9 // ::= | // ::= | . | . | . @@ -44,9 +46,13 @@ import ( // ::= | // ::= | | // ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// // ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// // ::= "e" | "E" // ``` // @@ -409,10 +415,10 @@ func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "numb // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: -// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between -// -1 and +1, it will be emitted as if q.Format were DecimalSI. -// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be -// rounded up. (1.1i becomes 2i.) +// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { return zeroBytes, nil diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index b1d314fb958..2be188a6a8e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -162,17 +162,18 @@ message ApplyOptions { // Condition contains details for one aspect of the current state of this API Resource. // --- // This struct is intended for direct use as an array at the field path .status.conditions. For example, -// type FooStatus struct{ -// // Represents the observations of a foo's current state. -// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" -// // +patchMergeKey=type -// // +patchStrategy=merge -// // +listType=map -// // +listMapKey=type -// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // -// // other fields -// } +// type FooStatus struct{ +// // Represents the observations of a foo's current state. +// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +// // +patchMergeKey=type +// // +patchStrategy=merge +// // +listType=map +// // +listMapKey=type +// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +// +// // other fields +// } message Condition { // type of condition in CamelCase or in foo.example.com/CamelCase. // --- diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 5b1ba1a883b..152f99296ca 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -17,10 +17,11 @@ limitations under the License. // Package v1 contains API types that are common to all versions. // // The package contains two categories of types: -// - external (serialized) types that lack their own version (e.g TypeMeta) -// - internal (never-serialized) types that are needed by several different -// api groups, and so live here, to avoid duplication and/or import loops -// (e.g. LabelSelector). +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// // In the future, we will probably move these categories of objects into // separate packages. package v1 @@ -1448,17 +1449,18 @@ type PartialObjectMetadataList struct { // Condition contains details for one aspect of the current state of this API Resource. // --- // This struct is intended for direct use as an array at the field path .status.conditions. For example, -// type FooStatus struct{ -// // Represents the observations of a foo's current state. -// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" -// // +patchMergeKey=type -// // +patchStrategy=merge -// // +listType=map -// // +listMapKey=type -// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // -// // other fields -// } +// type FooStatus struct{ +// // Represents the observations of a foo's current state. +// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +// // +patchMergeKey=type +// // +patchStrategy=merge +// // +listType=map +// // +listMapKey=type +// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +// +// // other fields +// } type Condition struct { // type of condition in CamelCase or in foo.example.com/CamelCase. // --- diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go index 2434429b9f8..6d6f562ad13 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go +++ b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go @@ -149,7 +149,8 @@ type Requirement struct { // (4) If the operator is Exists or DoesNotExist, the value set must be empty. // (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. // (6) The key is invalid due to its length, or sequence -// of characters. See validateLabelKey for more details. +// +// of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -208,13 +209,20 @@ func (r *Requirement) hasValue(value string) bool { // There is a match in the following cases: // (1) The operator is Exists and Labels has the Requirement's key. // (2) The operator is In, Labels has the Requirement's key and Labels' -// value for that key is in Requirement's value set. +// +// value for that key is in Requirement's value set. +// // (3) The operator is NotIn, Labels has the Requirement's key and -// Labels' value for that key is not in Requirement's value set. +// +// Labels' value for that key is not in Requirement's value set. +// // (4) The operator is DoesNotExist or NotIn and Labels does not have the -// Requirement's key. +// +// Requirement's key. +// // (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -840,32 +848,33 @@ func (p *Parser) parseExactValue() (sets.String, error) { // as they parse different selectors with different syntaxes. // The input will cause an error if it does not follow this form: // -// ::= | "," -// ::= [!] KEY [ | ] -// ::= "" | -// ::= | -// ::= "notin" -// ::= "in" -// ::= "(" ")" -// ::= VALUE | VALUE "," -// ::= ["="|"=="|"!="] VALUE +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE // // KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. // VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. // Delimiter is white space: (' ', '\t') // Example of valid syntax: -// "x in (foo,,baz),y,z notin ()" +// +// "x in (foo,,baz),y,z notin ()" // // Note: -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. // +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/allocator.go b/staging/src/k8s.io/apimachinery/pkg/runtime/allocator.go index 0d00d8c3a3b..cd6585205c1 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/allocator.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/allocator.go @@ -24,12 +24,14 @@ import ( // by caching created but unused items for later reuse, relieving pressure on the garbage collector. // // Usage: -// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator) -// defer runtime.AllocatorPool.Put(memoryAllocator) +// +// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator) +// defer runtime.AllocatorPool.Put(memoryAllocator) // // A note for future: -// consider introducing multiple pools for storing buffers of different sizes -// perhaps this could allow us to be more efficient. +// +// consider introducing multiple pools for storing buffers of different sizes +// perhaps this could allow us to be more efficient. var AllocatorPool = sync.Pool{ New: func() interface{} { return &Allocator{} diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go b/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go index a92863139ed..7fc513dd0e7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go @@ -344,14 +344,15 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi // Incoming kinds that match the provided groupKinds are preferred. // Kind may be empty in the provided group kind, in which case any kind will match. // Examples: -// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar -// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) // -// gv=mygroup/__internal, groupKinds=mygroup, anothergroup -// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) // -// gv=mygroup/__internal, groupKinds=mygroup, anothergroup -// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto index de634e2c64e..5f06cc5743d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -31,32 +31,37 @@ option go_package = "k8s.io/apimachinery/pkg/runtime"; // various plugin types. // // // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } +// +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } // // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. @@ -78,10 +83,12 @@ message RawExtension { // TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, // like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } +// +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind // // TypeMeta is provided here for convenience. You may use it directly from this package or define diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 994a3e3fa81..b21eb664e3f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,7 +191,8 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. +// +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -239,7 +240,8 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// in fewer places. +// +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go index ff3a3fe0a8f..18b25a994b8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,7 +118,8 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// every version with particular schemas. Resolve this method at that point. +// +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 9de35e791c0..21944f2d8fd 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,7 +259,8 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// All other callers will be forced to request a Codec directly. +// +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/types.go b/staging/src/k8s.io/apimachinery/pkg/runtime/types.go index 31359f35f45..3dc9a5a2f2a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/types.go @@ -21,10 +21,12 @@ package runtime // TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, // like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } +// +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind // // TypeMeta is provided here for convenience. You may use it directly from this package or define @@ -53,32 +55,37 @@ const ( // various plugin types. // // // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } +// +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// +// type PluginA struct { +// AOption string `json:"aOption"` +// } // // // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } +// +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } // // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. diff --git a/staging/src/k8s.io/apimachinery/pkg/types/nodename.go b/staging/src/k8s.io/apimachinery/pkg/types/nodename.go index fee348d7e76..cff9ca67176 100644 --- a/staging/src/k8s.io/apimachinery/pkg/types/nodename.go +++ b/staging/src/k8s.io/apimachinery/pkg/types/nodename.go @@ -23,21 +23,21 @@ package types // // To clarify the various types: // -// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. -// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// - Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. // -// * Hostname is the hostname of the local machine (from uname -n). -// However, some components allow the user to pass in a --hostname-override flag, -// which will override this in most places. In the absence of anything more meaningful, -// kubelet will use Hostname as the Node.Name when it creates the Node. +// - Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. // // * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. // -// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the -// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up -// to the cloudprovider how to do this mapping. +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. // -// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the -// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if -// we are using a custom DHCP domain it won't be. +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. type NodeName string diff --git a/staging/src/k8s.io/apimachinery/pkg/util/framer/framer.go b/staging/src/k8s.io/apimachinery/pkg/util/framer/framer.go index 10df0d99cd5..ca08f8561de 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -56,10 +56,10 @@ type lengthDelimitedFrameReader struct { // // The protocol is: // -// stream: message ... -// message: prefix body -// prefix: 4 byte uint32 in BigEndian order, denotes length of body -// body: bytes (0..prefix) +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) // // If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead // will be returned along with the number of bytes read. diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go index ab660c57e96..10b329594fd 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go @@ -700,7 +700,8 @@ func TestRoundTripPassesContextToDialer(t *testing.T) { } // exampleCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var exampleCert = []byte(`-----BEGIN CERTIFICATE----- MIIDADCCAeigAwIBAgIQVHG3Fn9SdWayyLOZKCW1vzANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw @@ -750,7 +751,8 @@ LB4rdf46lV0mUkvd2/oofIbTrzukjQSnyfLawb/2uJGV1IkTcZcn9CI= -----END RSA PRIVATE KEY-----`) // localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIDGTCCAgGgAwIBAgIRALL5AZcefF4kkYV1SEG6YrMwDQYJKoZIhvcNAQELBQAw EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2 diff --git a/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go index 990fa0d43a6..e3962756822 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -88,7 +88,8 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/port_split.go b/staging/src/k8s.io/apimachinery/pkg/util/net/port_split.go index c0fd4e20fe5..f54bb1e71c9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/port_split.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -25,9 +25,9 @@ import ( var validSchemes = sets.NewString("http", "https", "") // SplitSchemeNamePort takes a string of the following forms: -// * "", returns "", "","", true -// * ":", returns "", "","",true -// * "::", returns "","","",true +// - "", returns "", "","", true +// - ":", returns "", "","",true +// - "::", returns "","","",true // // Name must be non-empty or valid will be returned false. // Scheme must be "http" or "https" if specified @@ -57,9 +57,10 @@ func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { } // JoinSchemeNamePort returns a string that specifies the scheme, name, and port: -// * "" -// * ":" -// * "::" +// - "" +// - ":" +// - "::" +// // None of the parameters may contain a ':' character // Name is required // Scheme must be "", "http", or "https" diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/util.go b/staging/src/k8s.io/apimachinery/pkg/util/net/util.go index 5950087e022..1c2aba55f7b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/util.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/util.go @@ -25,6 +25,7 @@ import ( // IPNetEqual checks if the two input IPNets are representing the same subnet. // For example, +// // 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. // 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go index a35d3084601..705aea8ff0c 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go @@ -177,7 +177,8 @@ func TestDialURL(t *testing.T) { } // localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIDGTCCAgGgAwIBAgIRAKfNl1LEAt7nFPYvHBnpv2swDQYJKoZIhvcNAQELBQAw EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2 diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go index b9200993703..489d9b04264 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -39,7 +39,8 @@ import ( // atomsToAttrs states which attributes of which tags require URL substitution. // Sources: http://www.w3.org/TR/REC-html40/index/attributes.html -// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 +// +// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 var atomsToAttrs = map[atom.Atom]sets.String{ atom.A: sets.NewString("href"), atom.Applet: sets.NewString("codebase"), diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go index f57b69a03ab..f7fcff7c080 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware_test.go @@ -1113,7 +1113,8 @@ func TestProxyRedirectsforRootPath(t *testing.T) { } // exampleCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 1024 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 1024 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var exampleCert = []byte(`-----BEGIN CERTIFICATE----- MIIDADCCAeigAwIBAgIQVHG3Fn9SdWayyLOZKCW1vzANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw diff --git a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go index f895f234c58..8ddcddc5b8d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go @@ -755,7 +755,6 @@ func TestCustomStrategicMergePatch(t *testing.T) { // yields the correct outcome. They are also test cases for CreateTwoWayMergePatch // and CreateThreeWayMergePatch, to assert that they both generate the correct patch // for the given set of input documents. -// var createStrategicMergePatchTestCaseData = []byte(` testCases: - description: nil original diff --git a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go index d1469d7972d..137627b4050 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -29,9 +29,11 @@ import ( ) // For any test of the style: -// ... -// <- time.After(timeout): -// t.Errorf("Timed out") +// +// ... +// <- time.After(timeout): +// t.Errorf("Timed out") +// // The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s // is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine // (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. @@ -615,7 +617,7 @@ type WaitWithContextFunc func(ctx context.Context) <-chan struct{} // WaitFor continually checks 'fn' as driven by 'wait'. // -// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value +// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value // placed on the channel and once more when the channel is closed. If the channel is closed // and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. // @@ -636,7 +638,7 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // WaitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn' +// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' // returns false without error, WaitForWithContext returns ErrWaitTimeout. diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/filter.go b/staging/src/k8s.io/apimachinery/pkg/watch/filter.go index 22c9449f59c..a5735a0b47a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/filter.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/filter.go @@ -32,7 +32,6 @@ type FilterFunc func(in Event) (out Event, keep bool) // WARNING: filter has a fatal flaw, in that it can't properly update the // Type field (Add/Modified/Deleted) to reflect items beginning to pass the // filter when they previously didn't. -// func Filter(w Interface, f FilterFunc) Interface { fw := &filteredWatch{ incoming: w, diff --git a/staging/src/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/staging/src/k8s.io/apiserver/pkg/admission/initializer/interfaces.go index 191c090556d..83d3d48c180 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/initializer/interfaces.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/initializer/interfaces.go @@ -60,9 +60,10 @@ type WantsDrainedNotification interface { // WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin. // Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one // and assign it to a simple bool in the admission plugin struct. -// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ -// a.myFeatureIsOn = features.Enabled("my-feature") -// } +// +// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ +// a.myFeatureIsOn = features.Enabled("my-feature") +// } type WantsFeatures interface { InspectFeatureGates(featuregate.FeatureGate) admission.InitializationValidator diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go index 4d1576db4e1..02c5a740cc8 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/controller.go @@ -215,16 +215,16 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis // checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it // AFTER it has checked all the admissionAttributes. The method breaks down into phase like this: -// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the -// originals -// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed -// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change -// mark the waiter as succeeded. If some quota did change, update the running quotas -// 2. If no running quota was changed, return now since no updates are needed. -// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some -// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version -// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota -// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded. +// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the +// originals +// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed +// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change +// mark the waiter as succeeded. If some quota did change, update the running quotas +// 2. If no running quota was changed, return now since no updates are needed. +// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some +// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version +// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota +// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded. func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { // yet another copy to compare against originals to see if we actually have deltas originalQuotas, err := copyQuotas(quotas) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go index 67881798743..60cfe5536ba 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go @@ -18,15 +18,17 @@ limitations under the License. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto /* - Package v1 is a generated protocol buffer package. +Package v1 is a generated protocol buffer package. - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto +It is generated from these files: - It has these top-level messages: - ReplicaSet - ReplicaSetSpec - ReplicaSetStatus + k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto + +It has these top-level messages: + + ReplicaSet + ReplicaSetSpec + ReplicaSetStatus */ package v1 diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go b/staging/src/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go index 3d0123b23f7..7f8986faeac 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go @@ -43,8 +43,9 @@ var leadingDigits = regexp.MustCompile(`^(\d+)`) // MajorMinor parses a numeric major/minor version from the provided version info. // The minor version drops all characters after the first non-digit character: -// version.Info{Major:"1", Minor:"2+"} -> 1,2 -// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2 +// +// version.Info{Major:"1", Minor:"2+"} -> 1,2 +// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2 func MajorMinor(v version.Info) (int, int, error) { major, err := strconv.Atoi(v.Major) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index d81383628c7..d9844990c29 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -56,9 +56,9 @@ func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion sch // ToSubresource filter the managed fields of the main resource and convert // them so that they can be handled by scale. // For the managed fields that have a replicas path it performs two changes: -// 1. APIVersion is changed to the APIVersion of the scale subresource -// 2. Replicas path of the main resource is transformed to the replicas path of -// the scale subresource +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { managed, err := DecodeManagedFields(h.parentEntries) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go index d606c63c884..83be6fcbd57 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/context_test.go @@ -41,7 +41,7 @@ func TestNamespaceContext(t *testing.T) { } } -//TestUserContext validates that a userinfo can be get/set on a context object +// TestUserContext validates that a userinfo can be get/set on a context object func TestUserContext(t *testing.T) { ctx := NewContext() _, ok := UserFrom(ctx) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go b/staging/src/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go index 758e7addd28..893dfa2812b 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go @@ -42,19 +42,19 @@ type UserProvidedDecorator interface { // object implements the same subset of those interfaces as the inner http.ResponseWriter. // // This function handles the following three casses. -// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`, -// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter). -// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher` -// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter). -// - All the other cases collapse to this one, in which the given ResponseWriter is returned. +// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`, +// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter). +// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher` +// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter). +// - All the other cases collapse to this one, in which the given ResponseWriter is returned. // // There are three applicable terms: -// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function. -// - "user-provided decorator" or "middle": this is the user-provided decorator +// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function. +// - "user-provided decorator" or "middle": this is the user-provided decorator // that decorates an inner ResponseWriter object. A user-provided decorator // implements the UserProvidedDecorator interface. A user-provided decorator // may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker. -// - "inner": the ResponseWriter that the user-provided decorator extends. +// - "inner": the ResponseWriter that the user-provided decorator extends. func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter { // from go net/http documentation: // The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 67015bbe35c..03ec793f81f 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -924,13 +924,13 @@ func markAsDeleting(obj runtime.Object, now time.Time) (err error) { // grace period seconds (graceful deletion) and updating the list of // finalizers (finalization); it returns: // -// 1. an error -// 2. a boolean indicating that the object was not found, but it should be -// ignored -// 3. a boolean indicating that the object's grace period is exhausted and it -// should be deleted immediately -// 4. a new output object with the state that was updated -// 5. a copy of the last existing state of the object +// 1. an error +// 2. a boolean indicating that the object was not found, but it should be +// ignored +// 3. a boolean indicating that the object's grace period is exhausted and it +// should be deleted immediately +// 4. a new output object with the state that was updated +// 5. a copy of the last existing state of the object func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { lastGraceful := int64(0) var pendingFinalizers bool diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go index b9d53c3aad5..f4f3519b521 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go @@ -821,8 +821,8 @@ func (t *Tester) testDeleteNonExist(obj runtime.Object, opts metav1.DeleteOption } -// This test the fast-fail path. We test that the precondition gets verified -// again before deleting the object in tests of pkg/storage/etcd. +// This test the fast-fail path. We test that the precondition gets verified +// again before deleting the object in tests of pkg/storage/etcd. func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) { ctx := t.TestContext() @@ -858,8 +858,8 @@ func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getF } } -// This test the fast-fail path. We test that the precondition gets verified -// again before deleting the object in tests of pkg/storage/etcd. +// This test the fast-fail path. We test that the precondition gets verified +// again before deleting the object in tests of pkg/storage/etcd. func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) { ctx := t.TestContext() diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight_test.go index ad6f0f0d541..3db322ad7ec 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight_test.go @@ -75,13 +75,13 @@ func withFakeUser(handler http.Handler) http.Handler { } // Tests that MaxInFlightLimit works, i.e. -// - "long" requests such as proxy or watch, identified by regexp are not accounted despite -// hanging for the long time, -// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the -// constructor in flight at any given moment, -// - subsequent "short" requests are rejected instantly with appropriate error, -// - subsequent "long" requests are handled normally, -// - we correctly recover after some "short" requests finish, i.e. we can process new ones. +// - "long" requests such as proxy or watch, identified by regexp are not accounted despite +// hanging for the long time, +// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the +// constructor in flight at any given moment, +// - subsequent "short" requests are rejected instantly with appropriate error, +// - subsequent "long" requests are handled normally, +// - we correctly recover after some "short" requests finish, i.e. we can process new ones. func TestMaxInFlightNonMutating(t *testing.T) { const AllowedNonMutatingInflightRequestsNo = 3 diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 7c62b621ccc..dbcbcbd8954 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -418,41 +418,44 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { // or the secure port cannot be listened on initially. // This is the diagram of what channels/signals are dependent on each other: // -// stopCh -// | -// --------------------------------------------------------- -// | | -// ShutdownInitiated (shutdownInitiatedCh) | -// | | -// (ShutdownDelayDuration) (PreShutdownHooks) -// | | -// AfterShutdownDelayDuration (delayedStopCh) PreShutdownHooksStopped (preShutdownHooksHasStoppedCh) -// | | -// |-------------------------------------------------------| -// | -// | -// NotAcceptingNewRequest (notAcceptingNewRequestCh) -// | -// | -// |---------------------------------------------------------| -// | | | | -// [without [with | | -// ShutdownSendRetryAfter] ShutdownSendRetryAfter] | | -// | | | | -// | ---------------| | -// | | | -// | (HandlerChainWaitGroup::Wait) | -// | | | -// | InFlightRequestsDrained (drainedCh) | -// | | | -// ----------------------------------------|-----------------| -// | | -// stopHttpServerCh (AuditBackend::Shutdown()) -// | -// listenerStoppedCh -// | -// HTTPServerStoppedListening (httpServerStoppedListeningCh) +// stopCh +// | +// --------------------------------------------------------- +// | | +// ShutdownInitiated (shutdownInitiatedCh) | +// | | // +// (ShutdownDelayDuration) (PreShutdownHooks) +// +// | | +// AfterShutdownDelayDuration (delayedStopCh) PreShutdownHooksStopped (preShutdownHooksHasStoppedCh) +// | | +// |-------------------------------------------------------| +// | +// | +// NotAcceptingNewRequest (notAcceptingNewRequestCh) +// | +// | +// |---------------------------------------------------------| +// | | | | +// [without [with | | +// +// ShutdownSendRetryAfter] ShutdownSendRetryAfter] | | +// +// | | | | +// | ---------------| | +// | | | +// | (HandlerChainWaitGroup::Wait) | +// | | | +// | InFlightRequestsDrained (drainedCh) | +// | | | +// ----------------------------------------|-----------------| +// | | +// stopHttpServerCh (AuditBackend::Shutdown()) +// | +// listenerStoppedCh +// | +// HTTPServerStoppedListening (httpServerStoppedListeningCh) func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { delayedStopCh := s.lifecycleSignals.AfterShutdownDelayDuration shutdownInitiatedCh := s.lifecycleSignals.ShutdownInitiated diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_graceful_termination_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_graceful_termination_test.go index 6858796492c..808a3464dfb 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_graceful_termination_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_graceful_termination_test.go @@ -120,57 +120,56 @@ func newSignalInterceptingTestStep() *signalInterceptingTestStep { } } -// This test exercises the graceful termination scenario -// described in the following diagram -// - every vertical line is an independent timeline -// - the leftmost vertical line represents the go routine that -// is executing GenericAPIServer.Run methos -// - (signal name) indicates that the given lifecycle signal has been fired -// -// stopCh -// | -// |--------------------------------------------| -// | | -// call PreShutdownHooks (ShutdownInitiated) -// | | -// (PreShutdownHooksStopped) Sleep(ShutdownDelayDuration) -// | | -// | (AfterShutdownDelayDuration) -// | | -// | | -// |--------------------------------------------| -// | | -// | (NotAcceptingNewRequest) -// | | -// | |-------------------------------------------------| -// | | | -// | close(stopHttpServerCh) HandlerChainWaitGroup.Wait() -// | | | -// | server.Shutdown(timeout=60s) | -// | | | -// | stop listener (net/http) | -// | | | -// | |-------------------------------------| | -// | | | | -// | | (HTTPServerStoppedListening) | -// | | | -// | wait up to 60s | -// | | (InFlightRequestsDrained) -// | | -// | | -// | stoppedCh is closed -// | -// | -// <-drainedCh.Signaled() -// | -// s.AuditBackend.Shutdown() -// | -// <-listenerStoppedCh -// | -// <-stoppedCh -// | -// return nil +// This test exercises the graceful termination scenario +// described in the following diagram +// - every vertical line is an independent timeline +// - the leftmost vertical line represents the go routine that +// is executing GenericAPIServer.Run methos +// - (signal name) indicates that the given lifecycle signal has been fired // +// stopCh +// | +// |--------------------------------------------| +// | | +// call PreShutdownHooks (ShutdownInitiated) +// | | +// (PreShutdownHooksStopped) Sleep(ShutdownDelayDuration) +// | | +// | (AfterShutdownDelayDuration) +// | | +// | | +// |--------------------------------------------| +// | | +// | (NotAcceptingNewRequest) +// | | +// | |-------------------------------------------------| +// | | | +// | close(stopHttpServerCh) HandlerChainWaitGroup.Wait() +// | | | +// | server.Shutdown(timeout=60s) | +// | | | +// | stop listener (net/http) | +// | | | +// | |-------------------------------------| | +// | | | | +// | | (HTTPServerStoppedListening) | +// | | | +// | wait up to 60s | +// | | (InFlightRequestsDrained) +// | | +// | | +// | stoppedCh is closed +// | +// | +// <-drainedCh.Signaled() +// | +// s.AuditBackend.Shutdown() +// | +// <-listenerStoppedCh +// | +// <-stoppedCh +// | +// return nil func TestGracefulTerminationWithKeepListeningDuringGracefulTerminationDisabled(t *testing.T) { fakeAudit := &fakeAudit{} s := newGenericAPIServer(t, fakeAudit, false) @@ -333,50 +332,52 @@ func TestGracefulTerminationWithKeepListeningDuringGracefulTerminationDisabled(t } } -// This test exercises the graceful termination scenario -// described in the following diagram -// - every vertical line is an independent timeline -// - the leftmost vertical line represents the go routine that -// is executing GenericAPIServer.Run method -// - (signal) indicates that the given lifecycle signal has been fired +// This test exercises the graceful termination scenario +// described in the following diagram // -// stopCh -// | -// |--------------------------------------------| -// | | -// call PreShutdownHooks (ShutdownInitiated) -// | | -// (PreShutdownHooksCompleted) Sleep(ShutdownDelayDuration) -// | | -// | (AfterShutdownDelayDuration) -// | | -// | | -// |--------------------------------------------| -// | | -// | (NotAcceptingNewRequest) -// | | -// | HandlerChainWaitGroup.Wait() -// | | -// | (InFlightRequestsDrained) -// | | -// | | -// |------------------------------------------------------------| -// | | -// <-drainedCh.Signaled() close(stopHttpServerCh) -// | | +// - every vertical line is an independent timeline +// +// - the leftmost vertical line represents the go routine that +// is executing GenericAPIServer.Run method +// +// - (signal) indicates that the given lifecycle signal has been fired +// +// stopCh +// | +// |--------------------------------------------| +// | | +// call PreShutdownHooks (ShutdownInitiated) +// | | +// (PreShutdownHooksCompleted) Sleep(ShutdownDelayDuration) +// | | +// | (AfterShutdownDelayDuration) +// | | +// | | +// |--------------------------------------------| +// | | +// | (NotAcceptingNewRequest) +// | | +// | HandlerChainWaitGroup.Wait() +// | | +// | (InFlightRequestsDrained) +// | | +// | | +// |------------------------------------------------------------| +// | | +// <-drainedCh.Signaled() close(stopHttpServerCh) +// | | // s.AuditBackend.Shutdown() server.Shutdown(timeout=2s) -// | | -// | stop listener (net/http) -// | | -// | |-------------------------------------| -// | | | -// | wait up to 2s (HTTPServerStoppedListening) +// | | +// | stop listener (net/http) +// | | +// | |-------------------------------------| +// | | | +// | wait up to 2s (HTTPServerStoppedListening) // <-listenerStoppedCh | -// | stoppedCh is closed -// <-stoppedCh -// | -// return nil -// +// | stoppedCh is closed +// <-stoppedCh +// | +// return nil func TestGracefulTerminationWithKeepListeningDuringGracefulTerminationEnabled(t *testing.T) { fakeAudit := &fakeAudit{} s := newGenericAPIServer(t, fakeAudit, true) diff --git a/staging/src/k8s.io/apiserver/pkg/server/handler.go b/staging/src/k8s.io/apiserver/pkg/server/handler.go index be52f81e027..9f37df1cdff 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/handler.go +++ b/staging/src/k8s.io/apiserver/pkg/server/handler.go @@ -154,7 +154,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { d.nonGoRestfulMux.ServeHTTP(w, req) } -//TODO: Unify with RecoverPanics? +// TODO: Unify with RecoverPanics? func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, w http.ResponseWriter) { var buffer bytes.Buffer buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) diff --git a/staging/src/k8s.io/apiserver/pkg/server/healthz/doc.go b/staging/src/k8s.io/apiserver/pkg/server/healthz/doc.go index d938caa3713..ad3f0018733 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/healthz/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/server/healthz/doc.go @@ -16,6 +16,7 @@ limitations under the License. // Package healthz implements basic http server health checking. // Usage: -// import "k8s.io/apiserver/pkg/server/healthz" -// healthz.InstallHandler(mux) +// +// import "k8s.io/apiserver/pkg/server/healthz" +// healthz.InstallHandler(mux) package healthz // import "k8s.io/apiserver/pkg/server/healthz" diff --git a/staging/src/k8s.io/apiserver/pkg/server/hooks.go b/staging/src/k8s.io/apiserver/pkg/server/hooks.go index 999ad36000c..b33ddd04618 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/hooks.go +++ b/staging/src/k8s.io/apiserver/pkg/server/hooks.go @@ -35,6 +35,7 @@ import ( // 2. conflicts between the different processes all trying to perform the same action // 3. partially complete work (API server crashes while running your hook) // 4. API server access **BEFORE** your hook has completed +// // Think of it like a mini-controller that is super privileged and gets to run in-process // If you use this feature, tag @deads2k on github who has promised to review code for anyone's PostStartHook // until it becomes easier to use. diff --git a/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go index d39efa5be21..ce4c1b4a6ed 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go +++ b/staging/src/k8s.io/apiserver/pkg/server/lifecycle_signals.go @@ -107,8 +107,8 @@ type lifecycleSignal interface { // for us to write unit tests that can verify expected graceful termination behavior. // // GenericAPIServer can use these to either: -// - signal that a particular termination event has transpired -// - wait for a designated termination event to transpire and do some action. +// - signal that a particular termination event has transpired +// - wait for a designated termination event to transpire and do some action. type lifecycleSignals struct { // ShutdownInitiated event is signaled when an apiserver shutdown has been initiated. // It is signaled when the `stopCh` provided by the main goroutine diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go index 2d77e68979c..c265677bbbf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/admission.go @@ -70,12 +70,13 @@ type AdmissionOptions struct { // NewAdmissionOptions creates a new instance of AdmissionOptions // Note: -// In addition it calls RegisterAllAdmissionPlugins to register -// all generic admission plugins. // -// Provides the list of RecommendedPluginOrder that holds sane values -// that can be used by servers that don't care about admission chain. -// Servers that do care can overwrite/append that field after creation. +// In addition it calls RegisterAllAdmissionPlugins to register +// all generic admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. func NewAdmissionOptions() *AdmissionOptions { options := &AdmissionOptions{ Plugins: admission.NewPlugins(), @@ -115,7 +116,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { // In case admission plugin names were not provided by a cluster-admin they will be prepared from the recommended/default values. // In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers // note this method uses: -// genericconfig.Authorizer +// +// genericconfig.Authorizer func (a *AdmissionOptions) ApplyTo( c *server.Config, informers informers.SharedInformerFactory, @@ -220,7 +222,7 @@ func (a *AdmissionOptions) enabledPluginNames() []string { return orderedPlugins } -//Return names of plugins which are enabled by default +// Return names of plugins which are enabled by default func (a *AdmissionOptions) defaultEnabledPluginNames() []string { defaultOnPluginNames := []string{} for _, pluginName := range a.RecommendedPluginOrder { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go index 6a3f5a4bb55..c31ce47f8ca 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go @@ -38,8 +38,9 @@ import ( // DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to // the root kube API server. // WARNING: never assume that every authenticated incoming request already does authorization. -// The aggregator in the kube API server does this today, but this behaviour is not -// guaranteed in the future. +// +// The aggregator in the kube API server does this today, but this behaviour is not +// guaranteed in the future. type DelegatingAuthorizationOptions struct { // RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the // SubjectAccessReview.authorization.k8s.io endpoint for checking tokens. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 040e7c599ce..954a787ac42 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -585,7 +585,8 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o } // NOTICE: Keep in sync with shouldListFromStorage function in -// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +// +// staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go func shouldDelegateList(opts storage.ListOptions) bool { resourceVersion := opts.ResourceVersion pred := opts.Predicate diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/time_budget.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/time_budget.go index da77bd42b02..636c6ef8d6a 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/time_budget.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/time_budget.go @@ -30,13 +30,14 @@ const ( // timeBudget implements a budget of time that you can use and is // periodically being refreshed. The pattern to use it is: -// budget := newTimeBudget(...) -// ... -// timeout := budget.takeAvailable() -// // Now you can spend at most timeout on doing stuff -// ... -// // If you didn't use all timeout, return what you didn't use -// budget.returnUnused() +// +// budget := newTimeBudget(...) +// ... +// timeout := budget.takeAvailable() +// // Now you can spend at most timeout on doing stuff +// ... +// // If you didn't use all timeout, return what you didn't use +// budget.returnUnused() // // NOTE: It's not recommended to be used concurrently from multiple threads - // if first user takes the whole timeout, the second one will get 0 timeout diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index 833d10e153a..c455357e04d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -53,7 +53,9 @@ import ( // an interval as invalid iff we need to copy events from the // watchCache and we end up needing events that have already // been popped off. This translates to the following condition: -// watchCacheInterval::startIndex >= watchCache::startIndex. +// +// watchCacheInterval::startIndex >= watchCache::startIndex. +// // When this condition becomes false, the interval is no longer // valid and should not be used to retrieve and serve elements // from the underlying source. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go index a387b79cc59..96d592e7907 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/latency_tracker.go @@ -47,7 +47,8 @@ func NewETCDLatencyTracker(delegate clientv3.KV) clientv3.KV { // tracking function TrackStorageLatency is thread safe. // // NOTE: Compact is an asynchronous process and is not associated with -// any request, so we will not be tracking its latency. +// +// any request, so we will not be tracking its latency. type clientV3KVLatencyTracker struct { clientv3.KV } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/testingcert/certificates.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/testingcert/certificates.go index 3eecfda609c..8af0a351950 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/testingcert/certificates.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/testingcert/certificates.go @@ -20,13 +20,18 @@ package testingcert // https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details. // // ca-config.json: -// expiry was changed from 1 year to 100 years (876000h) +// +// expiry was changed from 1 year to 100 years (876000h) +// // ca-csr.json: -// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"}) -// key was changed from ecdsa,384 to rsa,2048 +// +// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"}) +// key was changed from ecdsa,384 to rsa,2048 +// // req-csr.json: -// key was changed from ecdsa,384 to rsa,2048 -// hosts were changed to "localhost","127.0.0.1" +// +// key was changed from ecdsa,384 to rsa,2048 +// hosts were changed to "localhost","127.0.0.1" const CAFileContent = ` -----BEGIN CERTIFICATE----- MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testserver/test_server.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testserver/test_server.go index 41a411bdf41..87665515d3e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testserver/test_server.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testserver/test_server.go @@ -49,11 +49,11 @@ func getAvailablePorts(count int) ([]int, error) { // NewTestConfig returns a configuration for an embedded etcd server. // The configuration is based on embed.NewConfig(), with the following adjustments: -// * sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only -// single-member server we never intend to restart or keep data from) -// * uses free ports for client and peer listeners -// * cleans up the data directory on test termination -// * silences server logs other than errors +// - sets UnsafeNoFsync = true to improve test performance (only reasonable in a test-only +// single-member server we never intend to restart or keep data from) +// - uses free ports for client and peer listeners +// - cleans up the data directory on test termination +// - silences server logs other than errors func NewTestConfig(t *testing.T) *embed.Config { cfg := embed.NewConfig() diff --git a/staging/src/k8s.io/apiserver/pkg/storageversion/updater.go b/staging/src/k8s.io/apiserver/pkg/storageversion/updater.go index ddd8dfbe632..ce4d87e91c4 100644 --- a/staging/src/k8s.io/apiserver/pkg/storageversion/updater.go +++ b/staging/src/k8s.io/apiserver/pkg/storageversion/updater.go @@ -91,10 +91,11 @@ func findStatusCondition(conditions []v1alpha1.StorageVersionCondition, // setStatusCondition sets the corresponding condition in conditions to newCondition. // conditions must be non-nil. -// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status -// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset, -// and newCondition is appended +// 1. if the condition of the specified type already exists: all fields of the existing condition are updated to +// newCondition, LastTransitionTime is set to now if the new status differs from the old status +// 2. if a condition of the specified type does not exist: LastTransitionTime is set to now() if unset, +// and newCondition is appended +// // NOTE: forceTransition allows overwriting LastTransitionTime even when the status doesn't change. func setStatusCondition(conditions *[]v1alpha1.StorageVersionCondition, newCondition v1alpha1.StorageVersionCondition, forceTransition bool) { diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go index 840d78ea181..fc30ebfd5bb 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/doc.go @@ -52,12 +52,12 @@ limitations under the License. // limit. In the original paper, the partial derivative of R(t) with // respect to t is // -// 1 / NEQ(t) . +// 1 / NEQ(t) . // // To generalize from transmitting one packet at a time to executing C // requests at a time, that derivative becomes // -// C / NEQ(t) . +// C / NEQ(t) . // // However, sometimes there are fewer than C requests available to // execute. For a given queue "q", let us also write "reqs(q, t)" for @@ -70,7 +70,7 @@ limitations under the License. // for server requests: at a particular time t, the partial derivative // of R(t) with respect to t is // -// min( C, sum[over q] reqs(q, t) ) / NEQ(t) . +// min( C, sum[over q] reqs(q, t) ) / NEQ(t) . // // In terms of the implementation outline, this is the rate at which // virtual time is advancing at time t (in virtual nanoseconds per @@ -116,5 +116,4 @@ limitations under the License. // queue’s virtual start time is advanced by G. When a request // finishes being served, and the actual service time was S, the // queue’s virtual start time is decremented by G - S. -// package queueset diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go index 78e0fdee7dd..34cdbaec90a 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go @@ -970,19 +970,19 @@ func TestTimeout(t *testing.T) { // TestContextCancel tests cancellation of a request's context. // The outline is: -// 1. Use a concurrency limit of 1. -// 2. Start request 1. -// 3. Use a fake clock for the following logic, to insulate from scheduler noise. -// 4. The exec fn of request 1 starts request 2, which should wait -// in its queue. -// 5. The exec fn of request 1 also forks a goroutine that waits 1 second -// and then cancels the context of request 2. -// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case), -// calls `req2.Finish`, which is expected to return after the context cancel. -// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation, -// if the scheduler gets the cancel done before StartRequest finishes; -// the test handles this without regard to whether the implementation will ever do that. -// 8. Check that the above took exactly 1 second. +// 1. Use a concurrency limit of 1. +// 2. Start request 1. +// 3. Use a fake clock for the following logic, to insulate from scheduler noise. +// 4. The exec fn of request 1 starts request 2, which should wait +// in its queue. +// 5. The exec fn of request 1 also forks a goroutine that waits 1 second +// and then cancels the context of request 2. +// 6. The exec fn of request 1, if StartRequest 2 returns a req2 (which is the normal case), +// calls `req2.Finish`, which is expected to return after the context cancel. +// 7. The queueset interface allows StartRequest 2 to return `nil` in this situation, +// if the scheduler gets the cancel done before StartRequest finishes; +// the test handles this without regard to whether the implementation will ever do that. +// 8. Check that the above took exactly 1 second. func TestContextCancel(t *testing.T) { metrics.Register() metrics.Reset() diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/eventclock/fake.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/eventclock/fake.go index 018fa826142..f23cae11d4f 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/eventclock/fake.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/eventclock/fake.go @@ -40,7 +40,8 @@ type waitGroupCounter struct { } // compile time assertion that waitGroupCounter meets requirements -// of GoRoutineCounter +// +// of GoRoutineCounter var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil) func (wgc *waitGroupCounter) Add(delta int) { diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go index e0f00f7e4f8..0921794ed77 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -134,7 +134,8 @@ func key(requestInfo *apirequest.RequestInfo) string { } // NOTICE: Keep in sync with shouldDelegateList function in -// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +// +// staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { resourceVersion := opts.ResourceVersion pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go index e4ebb21459b..287b100cfd8 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go @@ -195,8 +195,9 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu // GetInterestedWatchCount implements WatchTracker interface. // // TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't -// contain the Name field set. Figure out if we can somehow get it for the -// more accurate cost estimation. +// +// contain the Name field set. Figure out if we can somehow get it for the +// more accurate cost estimation. // // TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls. func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int { diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go index da140b1f0db..fcd953da3cd 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go @@ -38,7 +38,8 @@ func NewDefaultServiceResolver() ServiceResolver { // note that the name, namespace, and port are required and by default all // created addresses use HTTPS scheme. // for example: -// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443 +// +// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443 func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) { if len(name) == 0 || len(namespace) == 0 || port == 0 { return nil, errors.New("cannot resolve an empty service name or namespace or port") diff --git a/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go b/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go index 11474bfffde..09f54a49c74 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go +++ b/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -40,11 +40,10 @@ import ( // // Example client session: // -// CONNECT http://server.com with subprotocol "channel.k8s.io" -// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) -// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) -// CLOSE -// +// CONNECT http://server.com with subprotocol "channel.k8s.io" +// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) +// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) +// CLOSE const ChannelWebSocketProtocol = "channel.k8s.io" // The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character @@ -56,11 +55,10 @@ const ChannelWebSocketProtocol = "channel.k8s.io" // // Example client session: // -// CONNECT http://server.com with subprotocol "base64.channel.k8s.io" -// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN) -// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT) -// CLOSE -// +// CONNECT http://server.com with subprotocol "base64.channel.k8s.io" +// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN) +// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT) +// CLOSE const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io" type codecType int diff --git a/staging/src/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go b/staging/src/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go index 9f1e34c4d44..464510ea8f1 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go +++ b/staging/src/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go @@ -75,17 +75,17 @@ func (c *counterRaiser) IncreaseMetricsCounter(req *http.Request) { // NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap. // // It increases the `missingSAN` counter whenever: -// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field` -// which indicates an error caused by the deprecation of Common Name field when veryfing remote -// hostname -// 2. the server certificate in response contains no SAN. This indicates that this binary run -// with the GODEBUG=x509ignoreCN=0 in env +// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field` +// which indicates an error caused by the deprecation of Common Name field when veryfing remote +// hostname +// 2. the server certificate in response contains no SAN. This indicates that this binary run +// with the GODEBUG=x509ignoreCN=0 in env // // It increases the `sha1` counter whenever: -// 1. we get a x509.InsecureAlgorithmError with string `SHA1` -// which indicates an error caused by an insecure SHA1 signature -// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature. -// This indicates that this binary run with the GODEBUG=x509sha1=1 in env +// 1. we get a x509.InsecureAlgorithmError with string `SHA1` +// which indicates an error caused by an insecure SHA1 signature +// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature. +// This indicates that this binary run with the GODEBUG=x509sha1=1 in env func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper { return func(rt http.RoundTripper) http.RoundTripper { return &x509DeprecatedCertificateMetricsRTWrapper{ diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go b/staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go index a96d9bea30f..07f263b2e3e 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go @@ -190,10 +190,10 @@ func (b *bufferedBackend) processIncomingEvents(stopCh <-chan struct{}) { // The following things can cause collectEvents to stop and return the list // of events: // -// * Maximum number of events for a batch. -// * Timer has passed. -// * Buffer channel is closed and empty. -// * stopCh is closed. +// - Maximum number of events for a batch. +// - Timer has passed. +// - Buffer channel is closed and empty. +// - stopCh is closed. func (b *bufferedBackend) collectEvents(timer <-chan time.Time, stopCh <-chan struct{}) []*auditinternal.Event { var events []*auditinternal.Event diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go index 0a79502aa64..03f294abdcd 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go @@ -452,18 +452,18 @@ func (r *claimResolver) Verifier(iss string) (*oidc.IDTokenVerifier, error) { // OIDC Connect Core 1.0, section 5.6.2. // See: https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims // -// { -// ... (other normal claims)... -// "_claim_names": { -// "groups": "src1" -// }, -// "_claim_sources": { -// "src1": { -// "endpoint": "https://www.example.com", -// "access_token": "f005ba11" -// }, -// }, -// } +// { +// ... (other normal claims)... +// "_claim_names": { +// "groups": "src1" +// }, +// "_claim_sources": { +// "src1": { +// "endpoint": "https://www.example.com", +// "access_token": "f005ba11" +// }, +// }, +// } func (r *claimResolver) expand(c claims) error { const ( // The claim containing a map of endpoint references per claim. diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index bae5f0e4019..191b3731850 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -76,19 +76,19 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I // New creates a new WebhookAuthorizer from the provided kubeconfig file. // The config's cluster field is used to refer to the remote service, user refers to the returned authorizer. // -// # clusters refers to the remote service. -// clusters: -// - name: name-of-remote-authz-service -// cluster: -// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. -// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'. +// # clusters refers to the remote service. +// clusters: +// - name: name-of-remote-authz-service +// cluster: +// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. +// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'. // -// # users refers to the API server's webhook configuration. -// users: -// - name: name-of-api-server -// user: -// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use -// client-key: /path/to/key.pem # key matching the cert +// # users refers to the API server's webhook configuration. +// users: +// - name: name-of-api-server +// user: +// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use +// client-key: /path/to/key.pem # key matching the cert // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. @@ -120,45 +120,45 @@ func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, un // serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is // provided below. // -// { -// "apiVersion": "authorization.k8s.io/v1beta1", -// "kind": "SubjectAccessReview", -// "spec": { -// "resourceAttributes": { -// "namespace": "kittensandponies", -// "verb": "GET", -// "group": "group3", -// "resource": "pods" -// }, -// "user": "jane", -// "group": [ -// "group1", -// "group2" -// ] -// } -// } +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "spec": { +// "resourceAttributes": { +// "namespace": "kittensandponies", +// "verb": "GET", +// "group": "group3", +// "resource": "pods" +// }, +// "user": "jane", +// "group": [ +// "group1", +// "group2" +// ] +// } +// } // // The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or // disallow access. A permissive response would return: // -// { -// "apiVersion": "authorization.k8s.io/v1beta1", -// "kind": "SubjectAccessReview", -// "status": { -// "allowed": true -// } -// } +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": true +// } +// } // // To disallow access, the remote service would return: // -// { -// "apiVersion": "authorization.k8s.io/v1beta1", -// "kind": "SubjectAccessReview", -// "status": { -// "allowed": false, -// "reason": "user does not have read access to the namespace" -// } -// } +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": false, +// "reason": "user does not have read access to the namespace" +// } +// } // // TODO(mikedanese): We should eventually support failing closed when we // encounter an error. We are failing open now to preserve backwards compatible @@ -246,7 +246,7 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri } -//TODO: need to finish the method to get the rules when using webhook mode +// TODO: need to finish the method to get the rules when using webhook mode func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { var ( resourceRules []authorizer.ResourceRuleInfo diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go index 0eea122fe67..ed47e99942f 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go @@ -40,7 +40,8 @@ type CommandHeaderRoundTripper struct { // CommandHeaderRoundTripper adds Request headers before delegating to standard // round tripper. These headers are kubectl command headers which // detail the kubectl command. See SIG CLI KEP 859: -// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +// +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers func (c *CommandHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { for header, value := range c.Headers { req.Header.Set(header, value) @@ -50,7 +51,9 @@ func (c *CommandHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response // ParseCommandHeaders fills in a map of custom headers into the CommandHeaderRoundTripper. These // headers are then filled into each request. For details on the custom headers see: -// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +// +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +// // Each call overwrites the previously parsed command headers (not additive). // TODO(seans3): Parse/add flags removing PII from flag values. func (c *CommandHeaderRoundTripper) ParseCommandHeaders(cmd *cobra.Command, args []string) { diff --git a/staging/src/k8s.io/client-go/applyconfigurations/doc.go b/staging/src/k8s.io/client-go/applyconfigurations/doc.go index 48fdd660a06..afd1946927e 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/doc.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/doc.go @@ -20,22 +20,22 @@ limitations under the License. Package applyconfigurations provides typesafe go representations of the apply configurations that are used to constructs Server-side Apply requests. -Basics +# Basics The Apply functions in the typed client (see the k8s.io/client-go/kubernetes/typed packages) offer a direct and typesafe way of calling Server-side Apply. Each Apply function takes an "apply configuration" type as an argument, which is a structured representation of an Apply request. For example: - import ( - ... - v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1" - ) - hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns). - WithSpec(v1ac.HorizontalPodAutoscalerSpec(). - WithMinReplicas(0) - ) - return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true}) + import ( + ... + v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1" + ) + hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns). + WithSpec(v1ac.HorizontalPodAutoscalerSpec(). + WithMinReplicas(0) + ) + return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true}) Note in this example that HorizontalPodAutoscaler is imported from an "applyconfigurations" package. Each "apply configuration" type represents the same Kubernetes object kind as the @@ -43,46 +43,46 @@ corresponding go struct, but where all fields are pointers to make them optional requests to be accurately represented. For example, this when the apply configuration in the above example is marshalled to YAML, it produces: - apiVersion: autoscaling/v1 - kind: HorizontalPodAutoscaler - metadata: - name: myHPA - namespace: myNamespace - spec: - minReplicas: 0 + apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + metadata: + name: myHPA + namespace: myNamespace + spec: + minReplicas: 0 To understand why this is needed, the above YAML cannot be produced by the v1.HorizontalPodAutoscaler go struct. Take for example: - hpa := v1.HorizontalPodAutoscaler{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "autoscaling/v1", - Kind: "HorizontalPodAutoscaler", - }, - ObjectMeta: ObjectMeta{ - Namespace: ns, - Name: autoscalerName, - }, - Spec: v1.HorizontalPodAutoscalerSpec{ - MinReplicas: pointer.Int32Ptr(0), - }, - } + hpa := v1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: ObjectMeta{ + Namespace: ns, + Name: autoscalerName, + }, + Spec: v1.HorizontalPodAutoscalerSpec{ + MinReplicas: pointer.Int32Ptr(0), + }, + } The above code attempts to declare the same apply configuration as shown in the previous examples, but when marshalled to YAML, produces: - kind: HorizontalPodAutoscaler - apiVersion: autoscaling/v1 - metadata: - name: myHPA - namespace: myNamespace - creationTimestamp: null - spec: - scaleTargetRef: - kind: "" - name: "" - minReplicas: 0 - maxReplicas: 0 + kind: HorizontalPodAutoscaler + apiVersion: autoscaling/v1 + metadata: + name: myHPA + namespace: myNamespace + creationTimestamp: null + spec: + scaleTargetRef: + kind: "" + name: "" + minReplicas: 0 + maxReplicas: 0 Which, among other things, contains spec.maxReplicas set to 0. This is almost certainly not what the caller intended (the intended apply configuration says nothing about the maxReplicas field), @@ -102,7 +102,7 @@ general purpose library. In addition to the convenience, the With functions also developers from the underlying representation, which makes it safer for the underlying representation to be changed to support additional features in the future. -Controller Support +# Controller Support The new client-go support makes it much easier to use Server-side Apply in controllers, by either of two mechanisms. @@ -130,24 +130,24 @@ accidentally deleted. For such cases, an alternative to mechanism 1 is to replac reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a "extract/modify-in-place/apply" workflow. Here's an example of the new workflow: - fieldMgr := "my-field-manager" - deploymentClient := clientset.AppsV1().Deployments("default") - // read, could also be read from a shared informer - deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{}) - if err != nil { - // handle error - } - // extract - deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr) - if err != nil { - // handle error - } - // modify-in-place - deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container(). - WithName("modify-slice"). - WithImage("nginx:1.14.2"), - ) - // apply - applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr}) + fieldMgr := "my-field-manager" + deploymentClient := clientset.AppsV1().Deployments("default") + // read, could also be read from a shared informer + deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{}) + if err != nil { + // handle error + } + // extract + deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr) + if err != nil { + // handle error + } + // modify-in-place + deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container(). + WithName("modify-slice"). + WithImage("nginx:1.14.2"), + ) + // apply + applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr}) */ package applyconfigurations diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/register.go b/staging/src/k8s.io/client-go/kubernetes/fake/register.go index c3f0a3d5225..7c2f1c11a9d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/register.go @@ -125,14 +125,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go index b41466151d4..24046233b20 100644 --- a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go @@ -125,14 +125,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 7213193bf1b..464fff91167 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,7 +82,8 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// have been created with the "" namespace. +// +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/staging/src/k8s.io/client-go/rest/client.go b/staging/src/k8s.io/client-go/rest/client.go index c969300494c..2cf821bcd7a 100644 --- a/staging/src/k8s.io/client-go/rest/client.go +++ b/staging/src/k8s.io/client-go/rest/client.go @@ -52,7 +52,8 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header @@ -159,13 +160,14 @@ func readExpBackoffConfig() BackoffManager { // c, err := NewRESTClient(...) // if err != nil { ... } // resp, err := c.Verb("GET"). -// Path("pods"). -// SelectorParam("labels", "area=staging"). -// Timeout(10*time.Second). -// Do() +// +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// // if err != nil { ... } // list, ok := resp.(*api.PodList) -// func (c *RESTClient) Verb(verb string) *Request { return NewRequest(c).Verb(verb) } diff --git a/staging/src/k8s.io/client-go/rest/plugin.go b/staging/src/k8s.io/client-go/rest/plugin.go index c2b3dfc0f5e..ae5cbdc2c4c 100644 --- a/staging/src/k8s.io/client-go/rest/plugin.go +++ b/staging/src/k8s.io/client-go/rest/plugin.go @@ -36,9 +36,10 @@ type AuthProvider interface { } // Factory generates an AuthProvider plugin. -// clusterAddress is the address of the current cluster. -// config is the initial configuration for this plugin. -// persister allows the plugin to save updated configuration. +// +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) // AuthProviderConfigPersister allows a plugin to persist configuration info diff --git a/staging/src/k8s.io/client-go/rest/request.go b/staging/src/k8s.io/client-go/rest/request.go index bfc6ac64a56..dba933f7d6f 100644 --- a/staging/src/k8s.io/client-go/rest/request.go +++ b/staging/src/k8s.io/client-go/rest/request.go @@ -917,8 +917,8 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp // processing. // // Error type: -// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError -// * http.Client.Do errors are returned directly. +// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// - http.Client.Do errors are returned directly. func (r *Request) Do(ctx context.Context) Result { var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { @@ -1085,15 +1085,15 @@ const maxUnstructuredResponseTextBytes = 2048 // unexpected responses. The rough structure is: // // 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes -// - this is the happy path -// - when you get this output, trust what the server sends -// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to -// generate a reasonable facsimile of the original failure. -// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above -// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error -// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected -// initial contact, the presence of mismatched body contents from posted content types -// - Give these a separate distinct error type and capture as much as possible of the original message +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message // // TODO: introduce transformation of generic http.Client.Do() errors that separates 4. func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { diff --git a/staging/src/k8s.io/client-go/rest/warnings.go b/staging/src/k8s.io/client-go/rest/warnings.go index 18476f5ff90..ad493659f22 100644 --- a/staging/src/k8s.io/client-go/rest/warnings.go +++ b/staging/src/k8s.io/client-go/rest/warnings.go @@ -40,9 +40,9 @@ var ( // SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered. // By default, warnings are logged. Several built-in implementations are provided: -// - NoWarnings suppresses warnings. -// - WarningLogger logs warnings. -// - NewWarningWriter() outputs warnings to the provided writer. +// - NoWarnings suppresses warnings. +// - WarningLogger logs warnings. +// - NewWarningWriter() outputs warnings to the provided writer. func SetDefaultWarningHandler(l WarningHandler) { defaultWarningHandlerLock.Lock() defer defaultWarningHandlerLock.Unlock() diff --git a/staging/src/k8s.io/client-go/tools/auth/clientauth.go b/staging/src/k8s.io/client-go/tools/auth/clientauth.go index 4c24f79977f..ac30271fe0b 100644 --- a/staging/src/k8s.io/client-go/tools/auth/clientauth.go +++ b/staging/src/k8s.io/client-go/tools/auth/clientauth.go @@ -45,20 +45,20 @@ client.Client from an authcfg.Info. Example: - import ( - "pkg/client" - "pkg/client/auth" - ) + import ( + "pkg/client" + "pkg/client/auth" + ) - info, err := auth.LoadFromFile(filename) - if err != nil { - // handle error - } - clientConfig = client.Config{} - clientConfig.Host = "example.com:4901" - clientConfig = info.MergeWithConfig() - client := client.New(clientConfig) - client.Pods(ns).List() + info, err := auth.LoadFromFile(filename) + if err != nil { + // handle error + } + clientConfig = client.Config{} + clientConfig.Host = "example.com:4901" + clientConfig = info.MergeWithConfig() + client := client.New(clientConfig) + client.Pods(ns).List() */ package auth diff --git a/staging/src/k8s.io/client-go/tools/cache/controller.go b/staging/src/k8s.io/client-go/tools/cache/controller.go index ff4c22de00b..0762da3befa 100644 --- a/staging/src/k8s.io/client-go/tools/cache/controller.go +++ b/staging/src/k8s.io/client-go/tools/cache/controller.go @@ -199,17 +199,17 @@ func (c *controller) processLoop() { // can't return an error. The handlers MUST NOT modify the objects // received; this concerns not only the top level of structure but all // the data structures reachable from it. -// * OnAdd is called when an object is added. -// * OnUpdate is called when an object is modified. Note that oldObj is the -// last known state of the object-- it is possible that several changes -// were combined together, so you can't use this to see every single -// change. OnUpdate is also called when a re-list happens, and it will -// get called even if nothing changed. This is useful for periodically -// evaluating or syncing something. -// * OnDelete will get the final state of the item if it is known, otherwise -// it will get an object of type DeletedFinalStateUnknown. This can -// happen if the watch is closed and misses the delete event and we don't -// notice the deletion until the subsequent re-list. +// - OnAdd is called when an object is added. +// - OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// - OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. type ResourceEventHandler interface { OnAdd(obj interface{}) OnUpdate(oldObj, newObj interface{}) @@ -305,15 +305,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { // notifications to be faulty. // // Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -332,16 +331,15 @@ func NewInformer( // notifications to be faulty. // // Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// * indexers is the indexer for the received object type. -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - indexers is the indexer for the received object type. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -454,16 +452,15 @@ func processDeltas( // providing event notifications. // // Parameters -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// * clientState is the store you want to populate -// +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - clientState is the store you want to populate func newInformer( lw ListerWatcher, objType runtime.Object, diff --git a/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go b/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go index 2da2933ab74..0c13a41f065 100644 --- a/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go @@ -74,11 +74,11 @@ type DeltaFIFOOptions struct { // the Pop() method. // // DeltaFIFO solves this use case: -// * You want to process every object change (delta) at most once. -// * When you process an object, you want to see everything -// that's happened to it since you last processed it. -// * You want to process the deletion of some of the objects. -// * You might want to periodically reprocess objects. +// - You want to process every object change (delta) at most once. +// - When you process an object, you want to see everything +// that's happened to it since you last processed it. +// - You want to process the deletion of some of the objects. +// - You might want to periodically reprocess objects. // // DeltaFIFO's Pop(), Get(), and GetByKey() methods return // interface{} to satisfy the Store/Queue interfaces, but they @@ -179,21 +179,21 @@ type Deltas []Delta // "known" keys when Pop() is called. Have to think about how that // affects error retrying. // -// NOTE: It is possible to misuse this and cause a race when using an -// external known object source. -// Whether there is a potential race depends on how the consumer -// modifies knownObjects. In Pop(), process function is called under -// lock, so it is safe to update data structures in it that need to be -// in sync with the queue (e.g. knownObjects). +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the consumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). // -// Example: -// In case of sharedIndexInformer being a consumer -// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), -// there is no race as knownObjects (s.indexer) is modified safely -// under DeltaFIFO's lock. The only exceptions are GetStore() and -// GetIndexer() methods, which expose ways to modify the underlying -// storage. Currently these two methods are used for creating Lister -// and internal tests. +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. // // Also see the comment on DeltaFIFO. // diff --git a/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go b/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go index 7abdae73742..3f272b80b14 100644 --- a/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go @@ -25,13 +25,14 @@ import ( ) // ExpirationCache implements the store interface -// 1. All entries are automatically time stamped on insert -// a. The key is computed based off the original item/keyFunc -// b. The value inserted under that key is the timestamped item -// 2. Expiration happens lazily on read based on the expiration policy -// a. No item can be inserted into the store while we're expiring -// *any* item in the cache. -// 3. Time-stamps are stripped off unexpired entries before return +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// // Note that the ExpirationCache is inherently slower than a normal // threadSafeStore because it takes a write lock every time it checks if // an item has expired. diff --git a/staging/src/k8s.io/client-go/tools/cache/fifo.go b/staging/src/k8s.io/client-go/tools/cache/fifo.go index 5c9255027a0..8f3313783d5 100644 --- a/staging/src/k8s.io/client-go/tools/cache/fifo.go +++ b/staging/src/k8s.io/client-go/tools/cache/fifo.go @@ -103,10 +103,11 @@ func Pop(queue Queue) interface{} { // recent version will be processed. This can't be done with a channel // // FIFO solves this use case: -// * You want to process every object (exactly) once. -// * You want to process the most recent version of the object when you process it. -// * You do not want to process deleted objects, they should be removed from the queue. -// * You do not want to periodically reprocess objects. +// - You want to process every object (exactly) once. +// - You want to process the most recent version of the object when you process it. +// - You do not want to process deleted objects, they should be removed from the queue. +// - You do not want to periodically reprocess objects. +// // Compare with DeltaFIFO for other use cases. type FIFO struct { lock sync.RWMutex diff --git a/staging/src/k8s.io/client-go/tools/cache/index.go b/staging/src/k8s.io/client-go/tools/cache/index.go index c6af49d8c3a..b78d3086b8c 100644 --- a/staging/src/k8s.io/client-go/tools/cache/index.go +++ b/staging/src/k8s.io/client-go/tools/cache/index.go @@ -28,10 +28,10 @@ import ( // Delete). // // There are three kinds of strings here: -// 1. a storage key, as defined in the Store interface, -// 2. a name of an index, and -// 3. an "indexed value", which is produced by an IndexFunc and -// can be a field value or any other string computed from the object. +// 1. a storage key, as defined in the Store interface, +// 2. a name of an index, and +// 3. an "indexed value", which is produced by an IndexFunc and +// can be a field value or any other string computed from the object. type Indexer interface { Store // Index returns the stored objects whose set of indexed values diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/loader.go b/staging/src/k8s.io/client-go/tools/clientcmd/loader.go index 78bd9ed8d5c..4e301332d64 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/loader.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/loader.go @@ -160,8 +160,10 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { // Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice +// +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go index 03a13e6b63f..c64ba9b26b0 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -161,7 +161,7 @@ type LeaderElectionConfig struct { // lifecycle events of the LeaderElector. These are invoked asynchronously. // // possible future callbacks: -// * OnChallenge() +// - OnChallenge() type LeaderCallbacks struct { // OnStartedLeading is called when a LeaderElector client starts leading OnStartedLeading func(context.Context) diff --git a/staging/src/k8s.io/client-go/tools/portforward/portforward.go b/staging/src/k8s.io/client-go/tools/portforward/portforward.go index 6f1d12b66bf..9e4da91cff1 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/portforward.go +++ b/staging/src/k8s.io/client-go/tools/portforward/portforward.go @@ -62,18 +62,18 @@ type ForwardedPort struct { } /* - valid port specifications: +valid port specifications: - 5000 - - forwards from localhost:5000 to pod:5000 +5000 +- forwards from localhost:5000 to pod:5000 - 8888:5000 - - forwards from localhost:8888 to pod:5000 +8888:5000 +- forwards from localhost:8888 to pod:5000 - 0:5000 - :5000 - - selects a random available local port, - forwards from localhost: to pod:5000 +0:5000 +:5000 + - selects a random available local port, + forwards from localhost: to pod:5000 */ func parsePorts(ports []string) ([]ForwardedPort, error) { var forwards []ForwardedPort diff --git a/staging/src/k8s.io/client-go/tools/record/events_cache.go b/staging/src/k8s.io/client-go/tools/record/events_cache.go index 4f041e8fd26..abba06362aa 100644 --- a/staging/src/k8s.io/client-go/tools/record/events_cache.go +++ b/staging/src/k8s.io/client-go/tools/record/events_cache.go @@ -235,10 +235,10 @@ type aggregateRecord struct { // EventAggregate checks if a similar event has been seen according to the // aggregation configuration (max events, max interval, etc) and returns: // -// - The (potentially modified) event that should be created -// - The cache key for the event, for correlation purposes. This will be set to -// the full key for normal events, and to the result of -// EventAggregatorMessageFunc for aggregate events. +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { now := metav1.NewTime(e.clock.Now()) var record aggregateRecord @@ -427,14 +427,14 @@ type EventCorrelateResult struct { // prior to interacting with the API server to record the event. // // The default behavior is as follows: -// * Aggregation is performed if a similar event is recorded 10 times +// - Aggregation is performed if a similar event is recorded 10 times // in a 10 minute rolling interval. A similar event is an event that varies only by // the Event.Message field. Rather than recording the precise event, aggregation // will create a new event whose message reports that it has combined events with // the same reason. -// * Events are incrementally counted if the exact same event is encountered multiple +// - Events are incrementally counted if the exact same event is encountered multiple // times. -// * A source may burst 25 events about an object, but has a refill rate budget +// - A source may burst 25 events about an object, but has a refill rate budget // per object of 1 event every 5 minutes to control long-tail of spam. func NewEventCorrelator(clock clock.PassiveClock) *EventCorrelator { cacheSize := maxLruCacheEntries diff --git a/staging/src/k8s.io/client-go/tools/watch/until.go b/staging/src/k8s.io/client-go/tools/watch/until.go index bf74837d362..81d4ff0ddff 100644 --- a/staging/src/k8s.io/client-go/tools/watch/until.go +++ b/staging/src/k8s.io/client-go/tools/watch/until.go @@ -101,7 +101,9 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions // It guarantees you to see all events and in the order they happened. // Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. // (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing -// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// // The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { w, err := NewRetryWatcher(initialResourceVersion, watcherClient) diff --git a/staging/src/k8s.io/client-go/util/jsonpath/parser.go b/staging/src/k8s.io/client-go/util/jsonpath/parser.go index b84016a9f9c..40bab188dcd 100644 --- a/staging/src/k8s.io/client-go/util/jsonpath/parser.go +++ b/staging/src/k8s.io/client-go/util/jsonpath/parser.go @@ -478,7 +478,7 @@ func isBool(s string) bool { return s == "true" || s == "false" } -//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string +// UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string func UnquoteExtend(s string) (string, error) { n := len(s) if n < 2 { diff --git a/staging/src/k8s.io/client-go/util/retry/util.go b/staging/src/k8s.io/client-go/util/retry/util.go index 772f5bd7a77..0c6e504a6dc 100644 --- a/staging/src/k8s.io/client-go/util/retry/util.go +++ b/staging/src/k8s.io/client-go/util/retry/util.go @@ -74,30 +74,30 @@ func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) // backoff, and then try again. On a non-"Conflict" error, or if it retries too many times // and gives up, RetryOnConflict will return an error to the caller. // -// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { -// // Fetch the resource here; you need to refetch it on every try, since -// // if you got a conflict on the last update attempt then you need to get -// // the current version before making your own changes. -// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) -// if err != nil { -// return err -// } +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err != nil { +// return err +// } // -// // Make whatever updates to the resource are needed -// pod.Status.Phase = v1.PodFailed +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed // -// // Try to update -// _, err = c.Pods("mynamespace").UpdateStatus(pod) -// // You have to return err itself here (not wrapped inside another error) -// // so that RetryOnConflict can identify it correctly. -// return err -// }) -// if err != nil { -// // May be conflict if max retries were hit, or may be something unrelated -// // like permissions or a network error -// return err -// } -// ... +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... // // TODO: Make Backoff an interface? func RetryOnConflict(backoff wait.Backoff, fn func() error) error { diff --git a/staging/src/k8s.io/client-go/util/testing/fake_openapi_handler.go b/staging/src/k8s.io/client-go/util/testing/fake_openapi_handler.go index 4682ac9b7f2..9c0c0443db3 100644 --- a/staging/src/k8s.io/client-go/util/testing/fake_openapi_handler.go +++ b/staging/src/k8s.io/client-go/util/testing/fake_openapi_handler.go @@ -42,8 +42,9 @@ type FakeOpenAPIServer struct { // API server. // // specsPath - Give a path to some test data organized so that each GroupVersion -// has its own OpenAPI V3 JSON file. -// i.e. apps/v1beta1 is stored in /apps/v1beta1.json +// +// has its own OpenAPI V3 JSON file. +// i.e. apps/v1beta1 is stored in /apps/v1beta1.json func NewFakeOpenAPIV3Server(specsPath string) (*FakeOpenAPIServer, error) { mux := &testMux{ counts: map[string]int{}, diff --git a/staging/src/k8s.io/client-go/util/workqueue/doc.go b/staging/src/k8s.io/client-go/util/workqueue/doc.go index a5c976e0f9c..8555aa95fe1 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/doc.go +++ b/staging/src/k8s.io/client-go/util/workqueue/doc.go @@ -16,11 +16,11 @@ limitations under the License. // Package workqueue provides a simple queue that supports the following // features: -// * Fair: items processed in the order in which they are added. -// * Stingy: a single item will not be processed multiple times concurrently, -// and if an item is added multiple times before it can be processed, it -// will only be processed once. -// * Multiple consumers and producers. In particular, it is allowed for an -// item to be reenqueued while it is being processed. -// * Shutdown notifications. +// - Fair: items processed in the order in which they are added. +// - Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// - Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// - Shutdown notifications. package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/staging/src/k8s.io/cloud-provider/app/core.go b/staging/src/k8s.io/cloud-provider/app/core.go index fb6b6b46770..98e68b1cb25 100644 --- a/staging/src/k8s.io/cloud-provider/app/core.go +++ b/staging/src/k8s.io/cloud-provider/app/core.go @@ -17,7 +17,6 @@ limitations under the License. // Package app implements a server that runs a set of active // components. This includes node controllers, service and // route controller, and so on. -// package app import ( diff --git a/staging/src/k8s.io/cloud-provider/app/testing/testserver.go b/staging/src/k8s.io/cloud-provider/app/testing/testserver.go index fc849e2d5a0..a0a86150886 100644 --- a/staging/src/k8s.io/cloud-provider/app/testing/testserver.go +++ b/staging/src/k8s.io/cloud-provider/app/testing/testserver.go @@ -60,8 +60,9 @@ type Logger interface { // and location of the tmpdir are returned. // // Note: we return a tear-down func instead of a stop channel because the later will leak temporary -// files that because Golang testing's call to os.Exit will not give a stop channel go routine -// enough time to remove temporary files. +// +// files that because Golang testing's call to os.Exit will not give a stop channel go routine +// enough time to remove temporary files. func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) { stopCh := make(chan struct{}) var errCh chan error diff --git a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go index f60dcc17663..f0b29241970 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go @@ -49,9 +49,9 @@ import ( // labelReconcileInfo lists Node labels to reconcile, and how to reconcile them. // primaryKey and secondaryKey are keys of labels to reconcile. // - If both keys exist, but their values don't match. Use the value from the -// primaryKey as the source of truth to reconcile. +// primaryKey as the source of truth to reconcile. // - If ensureSecondaryExists is true, and the secondaryKey does not -// exist, secondaryKey will be added with the value of the primaryKey. +// exist, secondaryKey will be added with the value of the primaryKey. var labelReconcileInfo = []struct { primaryKey string secondaryKey string diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go index 866183aeed6..d1a998b6be2 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go @@ -64,7 +64,7 @@ func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Serv } } -//Wrap newService so that you don't have to call default arguments again and again. +// Wrap newService so that you don't have to call default arguments again and again. func defaultExternalService() *v1.Service { return newService("external-balancer", types.UID("123"), v1.ServiceTypeLoadBalancer) } @@ -996,14 +996,15 @@ func TestProcessServiceDeletion(t *testing.T) { // Test cases: // index finalizer timestamp wantLB | clean-up -// 0 0 0 0 | false (No finalizer, no clean up) -// 1 0 0 1 | false (Ignored as same with case 0) -// 2 0 1 0 | false (Ignored as same with case 0) -// 3 0 1 1 | false (Ignored as same with case 0) -// 4 1 0 0 | true -// 5 1 0 1 | false -// 6 1 1 0 | true (Service is deleted, needs clean up) -// 7 1 1 1 | true (Ignored as same with case 6) +// +// 0 0 0 0 | false (No finalizer, no clean up) +// 1 0 0 1 | false (Ignored as same with case 0) +// 2 0 1 0 | false (Ignored as same with case 0) +// 3 0 1 1 | false (Ignored as same with case 0) +// 4 1 0 0 | true +// 5 1 0 1 | false +// 6 1 1 0 | true (Service is deleted, needs clean up) +// 7 1 1 1 | true (Ignored as same with case 6) func TestNeedsCleanup(t *testing.T) { testCases := []struct { desc string @@ -1217,10 +1218,10 @@ func TestNeedsUpdate(t *testing.T) { } } -//All the test cases for ServiceCache uses a single cache, these below test cases should be run in order, -//as tc1 (addCache would add elements to the cache) -//and tc2 (delCache would remove element from the cache without it adding automatically) -//Please keep this in mind while adding new test cases. +// All the test cases for ServiceCache uses a single cache, these below test cases should be run in order, +// as tc1 (addCache would add elements to the cache) +// and tc2 (delCache would remove element from the cache without it adding automatically) +// Please keep this in mind while adding new test cases. func TestServiceCache(t *testing.T) { //ServiceCache a common service cache for all the test cases @@ -1327,7 +1328,7 @@ func TestServiceCache(t *testing.T) { } } -//Test a utility functions as it's not easy to unit test nodeSyncInternal directly +// Test a utility functions as it's not easy to unit test nodeSyncInternal directly func TestNodeSlicesEqualForLB(t *testing.T) { numNodes := 10 nArray := make([]*v1.Node, numNodes) diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/address.go b/staging/src/k8s.io/cloud-provider/node/helpers/address.go index 23405f61a37..6eb44a90feb 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/address.go +++ b/staging/src/k8s.io/cloud-provider/node/helpers/address.go @@ -51,10 +51,10 @@ func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddr // // If nodeIP is a specific IP, either IPv4 or IPv6, we will return node // addresses filtered such that: -// * Any address matching nodeIP will be listed first. -// * If nodeIP matches an address of a particular type (internal or external), -// that will be the *only* address of that type returned. -// * All remaining addresses are listed after. +// - Any address matching nodeIP will be listed first. +// - If nodeIP matches an address of a particular type (internal or external), +// that will be the *only* address of that type returned. +// - All remaining addresses are listed after. func PreferNodeIP(nodeIP net.IP, cloudNodeAddresses []v1.NodeAddress) ([]v1.NodeAddress, error) { // If nodeIP is unset, just use the addresses provided by the cloud provider as-is if nodeIP == nil { diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go index b5736c40db5..e74de077629 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go @@ -105,7 +105,6 @@ var resultTypeSupportedVerbs = []string{ // The 'input' is the input type used for creation (function argument). // The 'result' (not needed in this case) is the result type returned from the // client function. -// type extension struct { // VerbName is the name of the custom verb (Scale, Instantiate, etc..) VerbName string diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index feba7d02903..a4687e199e8 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -28,7 +28,9 @@ limitations under the License. // that efficiently convert between same-name types in the two // (internal, external) packages. The generated functions include // ones named -// autoConvert___To__ +// +// autoConvert___To__ +// // for each such pair of types --- both with (pkg1,pkg2) = // (internal,external) and (pkg1,pkg2) = (external,internal). The // generated conversion functions recurse on the structure of the data @@ -43,7 +45,9 @@ limitations under the License. // // For each pair of types `conversion-gen` will also generate a // function named -// Convert___To__ +// +// Convert___To__ +// // if both of two conditions are met: (1) the destination package does // not contain a function of that name in a non-generated file and (2) // the generation of the corresponding autoConvert_... function did @@ -65,12 +69,16 @@ limitations under the License. // package's `doc.go` file (currently anywhere in that file is // acceptable, but the recommended location is above the `package` // statement), of the form: -// // +k8s:conversion-gen= +// +// // +k8s:conversion-gen= +// // This introduces a conversion task, for which the destination // package is the one containing the file with the tag and the tag // identifies a package containing internal types. If there is also a // tag of the form -// // +k8s:conversion-gen-external-types= +// +// // +k8s:conversion-gen-external-types= +// // then it identifies the package containing the external types; // otherwise they are in the destination package. // @@ -82,7 +90,8 @@ limitations under the License. // // When generating for a package, individual types or fields of structs may opt // out of Conversion generation by specifying a comment on the of the form: -// // +k8s:conversion-gen=false +// +// // +k8s:conversion-gen=false package main import ( diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 888c2e306a9..16df95fd144 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -28,15 +28,18 @@ limitations under the License. // Generation is governed by comment tags in the source. Any package may // request DeepCopy generation by including a comment in the file-comments of // one file, of the form: -// // +k8s:deepcopy-gen=package +// +// // +k8s:deepcopy-gen=package // // DeepCopy functions can be generated for individual types, rather than the // entire package by specifying a comment on the type definion of the form: -// // +k8s:deepcopy-gen=true +// +// // +k8s:deepcopy-gen=true // // When generating for a whole package, individual types may opt out of // DeepCopy generation by specifying a comment on the of the form: -// // +k8s:deepcopy-gen=false +// +// // +k8s:deepcopy-gen=false // // Note that registration is a whole-package option, and is not available for // individual types. diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index f050a132788..47967520aee 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -24,18 +24,18 @@ limitations under the License. // request defaulter generation by including one or more comment tags at // the package comment level: // -// // +k8s:defaulter-gen= +// // +k8s:defaulter-gen= // // which will create defaulters for any type that contains the provided // field name (if the type has defaulters). Any type may request explicit // defaulting by providing the comment tag: // -// // +k8s:defaulter-gen=true|false +// // +k8s:defaulter-gen=true|false // // An existing defaulter method (`SetDefaults_TYPE`) can provide the // comment tag: // -// // +k8s:defaulter-gen=covers +// // +k8s:defaulter-gen=covers // // to indicate that the defaulter does not or should not call any nested // defaulters. diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go index b3c8d2e8728..c0238be8c28 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go @@ -110,7 +110,8 @@ func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, option // as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and // properly discriminate between empty and nil (which is not possible in protobuf). // TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181 -// has agreement +// +// has agreement func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) { switch t := decl.(type) { case *ast.FuncDecl: diff --git a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/main.go b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/main.go index eb976f86f61..f722a9d113e 100644 --- a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/main.go @@ -22,7 +22,8 @@ limitations under the License. // Generation is governed by comment tags in the source. Any package may // request Status generation by including a comment in the file-comments of // one file, of the form: -// // +k8s:prerelease-lifecycle-gen=true +// +// // +k8s:prerelease-lifecycle-gen=true // // // +k8s:prerelease-lifecycle-gen:introduced=1.19 // // +k8s:prerelease-lifecycle-gen:deprecated=1.22 diff --git a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go index 4456ca4f895..991b122f5f6 100644 --- a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go +++ b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go @@ -315,7 +315,8 @@ func (g *genPreleaseLifecycle) Filter(c *generator.Context, t *types.Type) bool // versionMethod returns the signature of an () method, nil or an error // if the type is wrong. Introduced() allows more efficient deep copy // implementations to be defined by the type's author. The correct signature -// func (t *T) () string +// +// func (t *T) () string func versionMethod(methodName string, t *types.Type) (*types.Signature, error) { f, found := t.Methods[methodName] if !found { diff --git a/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/fake/register.go index 30bfb630e20..3350b2ded93 100644 --- a/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/fake/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/scheme/register.go index 64fc7852b0c..066b540ced2 100644 --- a/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/examples/HyphenGroup/clientset/versioned/scheme/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/fake/register.go index 630b053f601..4711b6ae164 100644 --- a/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/fake/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/scheme/register.go index c47e10eeb26..472da566e93 100644 --- a/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/examples/MixedCase/clientset/versioned/scheme/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/internalversion/fake/register.go b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/internalversion/fake/register.go index 07f14723624..06f2832a8c8 100644 --- a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/internalversion/fake/register.go +++ b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/internalversion/fake/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/fake/register.go index 514a1262c3b..ee077eb0cd8 100644 --- a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/fake/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/scheme/register.go index af8a73d1dd9..851b918b313 100644 --- a/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/examples/apiserver/clientset/versioned/scheme/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go b/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go index c2e72aab834..58faccba6f2 100644 --- a/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go @@ -2367,7 +2367,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Type: []string{"object"}, }, }, @@ -2378,7 +2378,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "apiVersion": { diff --git a/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/fake/register.go b/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/fake/register.go index 79b2acd538e..82cda963bbf 100644 --- a/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/scheme/register.go b/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/scheme/register.go index 614eb332e50..fe8ec2221f2 100644 --- a/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/code-generator/examples/crd/clientset/versioned/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/component-base/configz/configz.go b/staging/src/k8s.io/component-base/configz/configz.go index 869af648bee..60e4f938ade 100644 --- a/staging/src/k8s.io/component-base/configz/configz.go +++ b/staging/src/k8s.io/component-base/configz/configz.go @@ -18,25 +18,26 @@ limitations under the License. // // Each component that wants to serve its ComponentConfig creates a Config // object, and the program should call InstallHandler once. e.g., -// func main() { -// boatConfig := getBoatConfig() -// planeConfig := getPlaneConfig() // -// bcz, err := configz.New("boat") -// if err != nil { -// panic(err) -// } -// bcz.Set(boatConfig) +// func main() { +// boatConfig := getBoatConfig() +// planeConfig := getPlaneConfig() // -// pcz, err := configz.New("plane") -// if err != nil { -// panic(err) -// } -// pcz.Set(planeConfig) +// bcz, err := configz.New("boat") +// if err != nil { +// panic(err) +// } +// bcz.Set(boatConfig) // -// configz.InstallHandler(http.DefaultServeMux) -// http.ListenAndServe(":8080", http.DefaultServeMux) -// } +// pcz, err := configz.New("plane") +// if err != nil { +// panic(err) +// } +// pcz.Set(planeConfig) +// +// configz.InstallHandler(http.DefaultServeMux) +// http.ListenAndServe(":8080", http.DefaultServeMux) +// } package configz import ( diff --git a/staging/src/k8s.io/component-base/logs/logs.go b/staging/src/k8s.io/component-base/logs/logs.go index 84d48f87738..5e87a858098 100644 --- a/staging/src/k8s.io/component-base/logs/logs.go +++ b/staging/src/k8s.io/component-base/logs/logs.go @@ -176,12 +176,12 @@ func (writer KlogWriter) Write(data []byte) (n int, err error) { // InitLogs disables support for contextual logging in klog while // that Kubernetes feature is not considered stable yet. Commands // which want to support contextual logging can: -// - call klog.EnableContextualLogging after calling InitLogs, -// with a fixed `true` or depending on some command line flag or -// a feature gate check -// - set up a FeatureGate instance, the advanced logging configuration -// with Options and call Options.ValidateAndApply with the FeatureGate; -// k8s.io/component-base/logs/example/cmd demonstrates how to do that +// - call klog.EnableContextualLogging after calling InitLogs, +// with a fixed `true` or depending on some command line flag or +// a feature gate check +// - set up a FeatureGate instance, the advanced logging configuration +// with Options and call Options.ValidateAndApply with the FeatureGate; +// k8s.io/component-base/logs/example/cmd demonstrates how to do that func InitLogs() { log.SetOutput(KlogWriter{}) log.SetFlags(0) diff --git a/staging/src/k8s.io/component-base/metrics/desc.go b/staging/src/k8s.io/component-base/metrics/desc.go index 50eefd587fb..2ca9cfa7c21 100644 --- a/staging/src/k8s.io/component-base/metrics/desc.go +++ b/staging/src/k8s.io/component-base/metrics/desc.go @@ -218,8 +218,8 @@ func (d *Desc) initializeDeprecatedDesc() { // GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). // // It will be useful in testing scenario that the same Desc be registered to different registry. -// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) -// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') func (d *Desc) GetRawDesc() *Desc { return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) } diff --git a/staging/src/k8s.io/component-base/metrics/metric.go b/staging/src/k8s.io/component-base/metrics/metric.go index e57e0b383d1..2980a972382 100644 --- a/staging/src/k8s.io/component-base/metrics/metric.go +++ b/staging/src/k8s.io/component-base/metrics/metric.go @@ -91,13 +91,14 @@ func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { // preprocessMetric figures out whether the lazy metric should be hidden or not. // This method takes a Version argument which should be the version of the binary in which // this code is currently being executed. A metric can be hidden under two conditions: -// 1. if the metric is deprecated and is outside the grace period (i.e. has been -// deprecated for more than one release -// 2. if the metric is manually disabled via a CLI flag. +// 1. if the metric is deprecated and is outside the grace period (i.e. has been +// deprecated for more than one release +// 2. if the metric is manually disabled via a CLI flag. // // Disclaimer: disabling a metric via a CLI flag has higher precedence than -// deprecation and will override show-hidden-metrics for the explicitly -// disabled metric. +// +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. func (r *lazyMetric) preprocessMetric(version semver.Version) { disabledMetricsLock.RLock() defer disabledMetricsLock.RUnlock() diff --git a/staging/src/k8s.io/component-base/metrics/value.go b/staging/src/k8s.io/component-base/metrics/value.go index 4a19aaa3bf9..bf8a6b8f65c 100644 --- a/staging/src/k8s.io/component-base/metrics/value.go +++ b/staging/src/k8s.io/component-base/metrics/value.go @@ -50,7 +50,8 @@ func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelVal // NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. // // Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), -// otherwise, no stability guarantees would be offered. +// +// otherwise, no stability guarantees would be offered. func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { if m == nil { return nil diff --git a/staging/src/k8s.io/component-base/traces/utils.go b/staging/src/k8s.io/component-base/traces/utils.go index 5b271fd1afb..fafbb631e1a 100644 --- a/staging/src/k8s.io/component-base/traces/utils.go +++ b/staging/src/k8s.io/component-base/traces/utils.go @@ -56,10 +56,11 @@ func NewProvider(ctx context.Context, baseSampler sdktrace.Sampler, resourceOpts } // WrapperFor can be used to add tracing to a *rest.Config. Example usage: -// tp := traces.NewProvider(...) -// config, _ := rest.InClusterConfig() -// config.Wrap(traces.WrapperFor(&tp)) -// kubeclient, _ := clientset.NewForConfig(config) +// +// tp := traces.NewProvider(...) +// config, _ := rest.InClusterConfig() +// config.Wrap(traces.WrapperFor(&tp)) +// kubeclient, _ := clientset.NewForConfig(config) func WrapperFor(tp *trace.TracerProvider) transport.WrapperFunc { return func(rt http.RoundTripper) http.RoundTripper { opts := []otelhttp.Option{ diff --git a/staging/src/k8s.io/component-helpers/auth/rbac/reconciliation/reconcile_rolebindings.go b/staging/src/k8s.io/component-helpers/auth/rbac/reconciliation/reconcile_rolebindings.go index 65ca2bd1a35..c8e15961778 100644 --- a/staging/src/k8s.io/component-helpers/auth/rbac/reconciliation/reconcile_rolebindings.go +++ b/staging/src/k8s.io/component-helpers/auth/rbac/reconciliation/reconcile_rolebindings.go @@ -226,8 +226,10 @@ func contains(list []rbacv1.Subject, item rbacv1.Subject) bool { } // diffSubjectLists returns lists containing the items unique to each provided list: -// list1Only = list1 - list2 -// list2Only = list2 - list1 +// +// list1Only = list1 - list2 +// list2Only = list2 - list1 +// // if both returned lists are empty, the provided lists are equal func diffSubjectLists(list1 []rbacv1.Subject, list2 []rbacv1.Subject) (list1Only []rbacv1.Subject, list2Only []rbacv1.Subject) { for _, list1Item := range list1 { diff --git a/staging/src/k8s.io/component-helpers/node/topology/helpers.go b/staging/src/k8s.io/component-helpers/node/topology/helpers.go index 18c838cca56..8eaff674a6a 100644 --- a/staging/src/k8s.io/component-helpers/node/topology/helpers.go +++ b/staging/src/k8s.io/component-helpers/node/topology/helpers.go @@ -23,8 +23,9 @@ import ( // GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; // it returns empty-string for no zone. // Since there are currently two separate zone keys: -// * "failure-domain.beta.kubernetes.io/zone" -// * "topology.kubernetes.io/zone" +// - "failure-domain.beta.kubernetes.io/zone" +// - "topology.kubernetes.io/zone" +// // GetZoneKey will first check failure-domain.beta.kubernetes.io/zone and if not exists, will then check // topology.kubernetes.io/zone func GetZoneKey(node *v1.Node) string { diff --git a/staging/src/k8s.io/controller-manager/pkg/leadermigration/config/default.go b/staging/src/k8s.io/controller-manager/pkg/leadermigration/config/default.go index 383058df34c..de8a2c9e0e5 100644 --- a/staging/src/k8s.io/controller-manager/pkg/leadermigration/config/default.go +++ b/staging/src/k8s.io/controller-manager/pkg/leadermigration/config/default.go @@ -19,7 +19,8 @@ package config import internal "k8s.io/controller-manager/config" // DefaultLeaderMigrationConfiguration returns the default LeaderMigrationConfiguration -// that is valid for this release of Kubernetes. +// +// that is valid for this release of Kubernetes. func DefaultLeaderMigrationConfiguration() *internal.LeaderMigrationConfiguration { return &internal.LeaderMigrationConfiguration{ LeaderName: "cloud-provider-extraction-migration", diff --git a/staging/src/k8s.io/controller-manager/pkg/leadermigration/migrator.go b/staging/src/k8s.io/controller-manager/pkg/leadermigration/migrator.go index 4e7ec3d10df..fdd74a46ab0 100644 --- a/staging/src/k8s.io/controller-manager/pkg/leadermigration/migrator.go +++ b/staging/src/k8s.io/controller-manager/pkg/leadermigration/migrator.go @@ -31,8 +31,9 @@ type LeaderMigrator struct { } // NewLeaderMigrator creates a LeaderMigrator with given config for the given component. component -// indicates which controller manager is requesting this leader migration, and it should be consistent -// with the component field of ControllerLeaderConfiguration. +// +// indicates which controller manager is requesting this leader migration, and it should be consistent +// with the component field of ControllerLeaderConfiguration. func NewLeaderMigrator(config *internal.LeaderMigrationConfiguration, component string) *LeaderMigrator { migratedControllers := make(map[string]bool) for _, leader := range config.ControllerLeaders { diff --git a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go index 935b8763dd1..5488726cce5 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go @@ -865,9 +865,9 @@ func (m *Int64Value) GetValue() int64 { // LinuxSandboxSecurityContext holds linux security configuration that will be // applied to a sandbox. Note that: -// 1) It does not apply to containers in the pods. -// 2) It may not be applicable to a PodSandbox which does not contain any running -// process. +// 1. It does not apply to containers in the pods. +// 2. It may not be applicable to a PodSandbox which does not contain any running +// process. type LinuxSandboxSecurityContext struct { // Configurations for the sandbox's namespaces. // This will be used only if the PodSandbox uses namespace for isolation. @@ -895,10 +895,11 @@ type LinuxSandboxSecurityContext struct { // AppArmor profile for the sandbox. Apparmor *SecurityProfile `protobuf:"bytes,10,opt,name=apparmor,proto3" json:"apparmor,omitempty"` // Seccomp profile for the sandbox, candidate values are: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed on the node. - // is the full path of the profile. + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // // Default: "", which is identical with unconfined. SeccompProfilePath string `protobuf:"bytes,7,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1249,8 +1250,9 @@ type PodSandboxConfig struct { // containers. For example, the files might be newline separated JSON // structured logs, systemd-journald journal files, gRPC trace files, etc. // E.g., - // PodSandboxConfig.LogDirectory = `/var/log/pods//` - // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` LogDirectory string `protobuf:"bytes,3,opt,name=log_directory,json=logDirectory,proto3" json:"log_directory,omitempty"` // DNS config for the sandbox. DnsConfig *DNSConfig `protobuf:"bytes,4,opt,name=dns_config,json=dnsConfig,proto3" json:"dns_config,omitempty"` @@ -3603,17 +3605,18 @@ type LinuxContainerSecurityContext struct { // AppArmor profile for the container. Apparmor *SecurityProfile `protobuf:"bytes,16,opt,name=apparmor,proto3" json:"apparmor,omitempty"` // AppArmor profile for the container, candidate values are: - // * runtime/default: equivalent to not specifying a profile. - // * unconfined: no profiles are loaded - // * localhost/: profile loaded on the node - // (localhost) by name. The possible profile names are detailed at - // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference + // - runtime/default: equivalent to not specifying a profile. + // - unconfined: no profiles are loaded + // - localhost/: profile loaded on the node + // (localhost) by name. The possible profile names are detailed at + // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference ApparmorProfile string `protobuf:"bytes,9,opt,name=apparmor_profile,json=apparmorProfile,proto3" json:"apparmor_profile,omitempty"` // Deprecated: Do not use. // Seccomp profile for the container, candidate values are: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed on the node. - // is the full path of the profile. + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // // Default: "", which is identical with unconfined. SeccompProfilePath string `protobuf:"bytes,10,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4300,10 +4303,11 @@ type ContainerConfig struct { Devices []*Device `protobuf:"bytes,8,rep,name=devices,proto3" json:"devices,omitempty"` // Key-value pairs that may be used to scope and select individual resources. // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL + // + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Unstructured key-value map that may be used by the kubelet to store and // retrieve arbitrary metadata. @@ -4319,8 +4323,9 @@ type ContainerConfig struct { // Path relative to PodSandboxConfig.LogDirectory for container to store // the log (STDOUT and STDERR) on the host. // E.g., - // PodSandboxConfig.LogDirectory = `/var/log/pods//` - // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` // // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in @@ -6934,10 +6939,11 @@ var xxx_messageInfo_UpdateRuntimeConfigResponse proto.InternalMessageInfo // 1. Required conditions: Conditions are required for kubelet to work // properly. If any required condition is unmet, the node will be not ready. // The required conditions include: -// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept -// basic containers e.g. container only needs host network. -// * NetworkReady: NetworkReady means the runtime network is up and ready to -// accept containers which require container network. +// - RuntimeReady: RuntimeReady means the runtime is up and ready to accept +// basic containers e.g. container only needs host network. +// - NetworkReady: NetworkReady means the runtime network is up and ready to +// accept containers which require container network. +// // 2. Optional conditions: Conditions are informative to the user, but kubelet // will not rely on. Since condition type is an arbitrary string, all conditions // not required are optional. These conditions will be exposed to users to help diff --git a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go index 3538c17b996..d080ba06af8 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go @@ -865,9 +865,9 @@ func (m *Int64Value) GetValue() int64 { // LinuxSandboxSecurityContext holds linux security configuration that will be // applied to a sandbox. Note that: -// 1) It does not apply to containers in the pods. -// 2) It may not be applicable to a PodSandbox which does not contain any running -// process. +// 1. It does not apply to containers in the pods. +// 2. It may not be applicable to a PodSandbox which does not contain any running +// process. type LinuxSandboxSecurityContext struct { // Configurations for the sandbox's namespaces. // This will be used only if the PodSandbox uses namespace for isolation. @@ -895,10 +895,11 @@ type LinuxSandboxSecurityContext struct { // AppArmor profile for the sandbox. Apparmor *SecurityProfile `protobuf:"bytes,10,opt,name=apparmor,proto3" json:"apparmor,omitempty"` // Seccomp profile for the sandbox, candidate values are: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed on the node. - // is the full path of the profile. + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // // Default: "", which is identical with unconfined. SeccompProfilePath string `protobuf:"bytes,7,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1249,8 +1250,9 @@ type PodSandboxConfig struct { // containers. For example, the files might be newline separated JSON // structured logs, systemd-journald journal files, gRPC trace files, etc. // E.g., - // PodSandboxConfig.LogDirectory = `/var/log/pods//` - // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` LogDirectory string `protobuf:"bytes,3,opt,name=log_directory,json=logDirectory,proto3" json:"log_directory,omitempty"` // DNS config for the sandbox. DnsConfig *DNSConfig `protobuf:"bytes,4,opt,name=dns_config,json=dnsConfig,proto3" json:"dns_config,omitempty"` @@ -3603,17 +3605,18 @@ type LinuxContainerSecurityContext struct { // AppArmor profile for the container. Apparmor *SecurityProfile `protobuf:"bytes,16,opt,name=apparmor,proto3" json:"apparmor,omitempty"` // AppArmor profile for the container, candidate values are: - // * runtime/default: equivalent to not specifying a profile. - // * unconfined: no profiles are loaded - // * localhost/: profile loaded on the node - // (localhost) by name. The possible profile names are detailed at - // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference + // - runtime/default: equivalent to not specifying a profile. + // - unconfined: no profiles are loaded + // - localhost/: profile loaded on the node + // (localhost) by name. The possible profile names are detailed at + // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference ApparmorProfile string `protobuf:"bytes,9,opt,name=apparmor_profile,json=apparmorProfile,proto3" json:"apparmor_profile,omitempty"` // Deprecated: Do not use. // Seccomp profile for the container, candidate values are: - // * runtime/default: the default profile for the container runtime - // * unconfined: unconfined profile, ie, no seccomp sandboxing - // * localhost/: the profile installed on the node. - // is the full path of the profile. + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // // Default: "", which is identical with unconfined. SeccompProfilePath string `protobuf:"bytes,10,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4300,10 +4303,11 @@ type ContainerConfig struct { Devices []*Device `protobuf:"bytes,8,rep,name=devices,proto3" json:"devices,omitempty"` // Key-value pairs that may be used to scope and select individual resources. // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL + // + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Unstructured key-value map that may be used by the kubelet to store and // retrieve arbitrary metadata. @@ -4319,8 +4323,9 @@ type ContainerConfig struct { // Path relative to PodSandboxConfig.LogDirectory for container to store // the log (STDOUT and STDERR) on the host. // E.g., - // PodSandboxConfig.LogDirectory = `/var/log/pods//` - // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` // // WARNING: Log management and how kubelet should interface with the // container logs are under active discussion in @@ -6934,10 +6939,11 @@ var xxx_messageInfo_UpdateRuntimeConfigResponse proto.InternalMessageInfo // 1. Required conditions: Conditions are required for kubelet to work // properly. If any required condition is unmet, the node will be not ready. // The required conditions include: -// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept -// basic containers e.g. container only needs host network. -// * NetworkReady: NetworkReady means the runtime network is up and ready to -// accept containers which require container network. +// - RuntimeReady: RuntimeReady means the runtime is up and ready to accept +// basic containers e.g. container only needs host network. +// - NetworkReady: NetworkReady means the runtime network is up and ready to +// accept containers which require container network. +// // 2. Optional conditions: Conditions are informative to the user, but kubelet // will not rely on. Since condition type is an arbitrary string, all conditions // not required are optional. These conditions will be exposed to users to help diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go index 3b3dbac1f63..cf1ba112436 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/aws_ebs.go @@ -232,11 +232,13 @@ var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$") // KubernetesVolumeIDToEBSVolumeID translates Kubernetes volume ID to EBS volume ID // KubernetesVolumeID forms: -// * aws:/// -// * aws:/// -// * +// - aws:/// +// - aws:/// +// - +// // EBS Volume ID form: -// * vol- +// - vol- +// // This translation shouldn't be needed and should be fixed in long run // See https://github.com/kubernetes/kubernetes/issues/73730 func KubernetesVolumeIDToEBSVolumeID(kubernetesID string) (string, error) { diff --git a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go index e6be04eeef2..2ce4c271de5 100644 --- a/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go +++ b/staging/src/k8s.io/csi-translation-lib/plugins/in_tree_volume.go @@ -215,12 +215,12 @@ func translateTopologyFromInTreeToCSI(pv *v1.PersistentVolume, csiTopologyKey st // getTopologyLabel checks if the kubernetes topology label used in this // PV is GA and return the zone/region label used. // The version checking follows the following orders -// 1. Check NodeAffinity -// 1.1 Check if zoneGA exists, if yes return GA labels -// 1.2 Check if zoneBeta exists, if yes return Beta labels -// 2. Check PV labels -// 2.1 Check if zoneGA exists, if yes return GA labels -// 2.2 Check if zoneBeta exists, if yes return Beta labels +// 1. Check NodeAffinity +// 1.1 Check if zoneGA exists, if yes return GA labels +// 1.2 Check if zoneBeta exists, if yes return Beta labels +// 2. Check PV labels +// 2.1 Check if zoneGA exists, if yes return GA labels +// 2.2 Check if zoneBeta exists, if yes return Beta labels func getTopologyLabel(pv *v1.PersistentVolume) (zoneLabel string, regionLabel string) { if zoneGA := TopologyKeyExist(v1.LabelTopologyZone, pv.Spec.NodeAffinity); zoneGA { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go index 0bdc5ab5076..18428eac8ac 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go index 904397937f8..cb21939117f 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/fake/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/fake/register.go index accec4e7281..3d8761b6d60 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/fake/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme/register.go b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme/register.go index e7215ac2203..a4ef5421d07 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme/register.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index fe40f595a37..faeefe228ce 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -204,7 +204,8 @@ func (c *autoRegisterController) processNextWorkItem() bool { // checkAPIService syncs the current APIService against a list of desired APIService objects // -// | A. desired: not found | B. desired: sync on start | C. desired: sync always +// | A. desired: not found | B. desired: sync on start | C. desired: sync always +// // ------------------------------------------------|-----------------------|---------------------------|------------------------ // 1. current: lookup error | error | error | error // 2. current: not found | - | create once | create diff --git a/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go index 3272fa71e5c..a1490307a23 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go @@ -2370,7 +2370,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Type: []string{"object"}, }, }, @@ -2381,7 +2381,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "apiVersion": { diff --git a/staging/src/k8s.io/kube-scheduler/extender/v1/types.go b/staging/src/k8s.io/kube-scheduler/extender/v1/types.go index e1f07c3dd7c..ebb4fd7e5dc 100644 --- a/staging/src/k8s.io/kube-scheduler/extender/v1/types.go +++ b/staging/src/k8s.io/kube-scheduler/extender/v1/types.go @@ -45,8 +45,9 @@ type ExtenderPreemptionArgs struct { } // Victims represents: -// pods: a group of pods expected to be preempted. -// numPDBViolations: the count of violations of PodDisruptionBudget +// +// pods: a group of pods expected to be preempted. +// numPDBViolations: the count of violations of PodDisruptionBudget type Victims struct { Pods []*v1.Pod NumPDBViolations int64 @@ -58,9 +59,10 @@ type MetaPod struct { } // MetaVictims represents: -// pods: a group of pods expected to be preempted. -// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. -// numPDBViolations: the count of violations of PodDisruptionBudget +// +// pods: a group of pods expected to be preempted. +// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. +// numPDBViolations: the count of violations of PodDisruptionBudget type MetaVictims struct { Pods []*MetaPod NumPDBViolations int64 diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go b/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go index 5cdfcb66327..d15b710c48b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go @@ -444,16 +444,18 @@ func NewKubectlCommand(o KubectlOptions) *cobra.Command { } // addCmdHeaderHooks performs updates on two hooks: -// 1) Modifies the passed "cmds" persistent pre-run function to parse command headers. -// These headers will be subsequently added as X-headers to every -// REST call. -// 2) Adds CommandHeaderRoundTripper as a wrapper around the standard -// RoundTripper. CommandHeaderRoundTripper adds X-Headers then delegates -// to standard RoundTripper. +// 1. Modifies the passed "cmds" persistent pre-run function to parse command headers. +// These headers will be subsequently added as X-headers to every +// REST call. +// 2. Adds CommandHeaderRoundTripper as a wrapper around the standard +// RoundTripper. CommandHeaderRoundTripper adds X-Headers then delegates +// to standard RoundTripper. +// // For beta, these hooks are updated unless the KUBECTL_COMMAND_HEADERS environment variable // is set, and the value of the env var is false (or zero). // See SIG CLI KEP 859 for more information: -// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +// +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers func addCmdHeaderHooks(cmds *cobra.Command, kubeConfigFlags *genericclioptions.ConfigFlags) { // If the feature gate env var is set to "false", then do no add kubectl command headers. if value, exists := os.LookupEnv(kubectlCmdHeaders); exists { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go index ea280407421..1e5bd8bf78b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go @@ -403,8 +403,9 @@ func (o *DebugOptions) visitNode(ctx context.Context, node *corev1.Node) (*corev } // visitPod handles debugging for pod targets by (depending on options): -// 1. Creating an ephemeral debug container in an existing pod, OR -// 2. Making a copy of pod with certain attributes changed +// 1. Creating an ephemeral debug container in an existing pod, OR +// 2. Making a copy of pod with certain attributes changed +// // visitPod returns a pod and debug container name for subsequent attach, if applicable. func (o *DebugOptions) visitPod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, string, error) { if len(o.CopyTo) > 0 { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go index f9f481769b1..2b205667c6b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/customcolumn.go @@ -38,12 +38,14 @@ import ( var jsonRegexp = regexp.MustCompile(`^\{\.?([^{}]+)\}$|^\.?([^{}]+)$`) // RelaxedJSONPathExpression attempts to be flexible with JSONPath expressions, it accepts: -// * metadata.name (no leading '.' or curly braces '{...}' -// * {metadata.name} (no leading '.') -// * .metadata.name (no curly braces '{...}') -// * {.metadata.name} (complete expression) +// - metadata.name (no leading '.' or curly braces '{...}' +// - {metadata.name} (no leading '.') +// - .metadata.name (no curly braces '{...}') +// - {.metadata.name} (complete expression) +// // And transforms them all into a valid jsonpath expression: -// {.metadata.name} +// +// {.metadata.name} func RelaxedJSONPathExpression(pathExpression string) (string, error) { if len(pathExpression) == 0 { return pathExpression, nil @@ -67,8 +69,8 @@ func RelaxedJSONPathExpression(pathExpression string) (string, error) { // NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of
: pairs. // e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: // -// NAME API_VERSION -// foo bar +// NAME API_VERSION +// foo bar func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder, noHeaders bool) (*CustomColumnsPrinter, error) { if len(spec) == 0 { return nil, fmt.Errorf("custom-columns format specified but no custom columns given") diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go index e45b686b391..3ac1dd3a28d 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go @@ -131,15 +131,15 @@ func extractFieldPathAsString(obj interface{}, fieldPath string) (string, error) // splitMaybeSubscriptedPath checks whether the specified fieldPath is // subscripted, and -// - if yes, this function splits the fieldPath into path and subscript, and -// returns (path, subscript, true). -// - if no, this function returns (fieldPath, "", false). +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). // // Example inputs and outputs: -// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) -// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) -// - "metadata.labels['']" --> ("metadata.labels", "", true) -// - "metadata.labels" --> ("metadata.labels", "", false) +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels[”]" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) func splitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { if !strings.HasSuffix(fieldPath, "']") { return fieldPath, "", false diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go index f6f6e79ae7d..6dea1eb4c3a 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go @@ -293,7 +293,7 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { return utilerrors.NewAggregate(allErrs) } -//Note: the obj mutates in the function +// Note: the obj mutates in the function func updateSubjectForObject(obj runtime.Object, subjects []rbacv1.Subject, fn updateSubjects) (bool, error) { switch t := obj.(type) { case *rbacv1.RoleBinding: diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index f3c6062b924..a1efdd0af7b 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -5037,7 +5037,7 @@ func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (st // Add adds one or more describer functions to the Describer. The passed function must // match the signature: // -// func(...) (string, error) +// func(...) (string, error) // // Any number of arguments may be provided. func (d *Describers) Add(fns ...interface{}) error { diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go index ed51da2fcb7..00426600f37 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go @@ -167,9 +167,9 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m // equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] // We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() diff --git a/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go b/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go index 38e90c37cc6..f0352d9ef20 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go +++ b/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go @@ -168,9 +168,9 @@ func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc, chunkS // EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] // We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() diff --git a/staging/src/k8s.io/kubectl/pkg/util/term/term_writer.go b/staging/src/k8s.io/kubectl/pkg/util/term/term_writer.go index ea254bb6861..e3f60088026 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/term/term_writer.go +++ b/staging/src/k8s.io/kubectl/pkg/util/term/term_writer.go @@ -35,9 +35,11 @@ type wordWrapWriter struct { // NewResponsiveWriter creates a Writer that detects the column width of the // terminal we are in, and adjusts every line width to fit and use recommended // terminal sizes for better readability. Does proper word wrapping automatically. -// if terminal width >= 120 columns use 120 columns -// if terminal width >= 100 columns use 100 columns -// if terminal width >= 80 columns use 80 columns +// +// if terminal width >= 120 columns use 120 columns +// if terminal width >= 100 columns use 100 columns +// if terminal width >= 80 columns use 80 columns +// // In case we're not in a terminal or if it's smaller than 80 columns width, // doesn't do any wrapping. func NewResponsiveWriter(w io.Writer) io.Writer { diff --git a/staging/src/k8s.io/kubectl/pkg/util/util.go b/staging/src/k8s.io/kubectl/pkg/util/util.go index af704b09183..ea57d3b39bc 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/util.go +++ b/staging/src/k8s.io/kubectl/pkg/util/util.go @@ -52,10 +52,10 @@ func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { // ParseFileSource parses the source given. // -// Acceptable formats include: -// 1. source-path: the basename will become the key name -// 2. source-name=source-path: the source-name will become the key name and -// source-path is the path to the key file. +// Acceptable formats include: +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and +// source-path is the path to the key file. // // Key names cannot include '='. func ParseFileSource(source string) (keyName, filePath string, err error) { diff --git a/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.pb.go index 6a79013e123..b720b6ec8e8 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.pb.go @@ -197,10 +197,11 @@ func (m *ListAndWatchResponse) GetDevices() []*Device { } // E.g: -// struct Device { -// ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", -// Health: "Healthy", -//} +// +// struct Device { +// ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", +// Health: "Healthy", +// } type Device struct { // A unique ID assigned by the device plugin used // to identify devices during the communication @@ -258,12 +259,12 @@ func (m *Device) GetHealth() string { return "" } -// - Allocate is expected to be called during pod creation since allocation -// failures for any container would result in pod startup failure. -// - Allocate allows kubelet to exposes additional artifacts in a pod's -// environment as directed by the plugin. -// - Allocate allows Device Plugin to run device specific operations on -// the Devices requested +// - Allocate is expected to be called during pod creation since allocation +// failures for any container would result in pod startup failure. +// - Allocate allows kubelet to exposes additional artifacts in a pod's +// environment as directed by the plugin. +// - Allocate allows Device Plugin to run device specific operations on +// the Devices requested type AllocateRequest struct { DevicesIDs []string `protobuf:"bytes,1,rep,name=devicesIDs,proto3" json:"devicesIDs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go index ced0bf57c50..4f183239ca4 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go @@ -351,13 +351,14 @@ func (m *NUMANode) GetID() int64 { } // E.g: -// struct Device { -// ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", -// Health: "Healthy", -// Topology: -// Node: -// ID: 1 -//} +// +// struct Device { +// ID: "GPU-fef8089b-4820-abfc-e83e-94318197576e", +// Health: "Healthy", +// Topology: +// Node: +// ID: 1 +// } type Device struct { // A unique ID assigned by the device plugin used // to identify devices during the communication @@ -424,10 +425,10 @@ func (m *Device) GetTopology() *TopologyInfo { return nil } -// - PreStartContainer is expected to be called before each container start if indicated by plugin during registration phase. -// - PreStartContainer allows kubelet to pass reinitialized devices to containers. -// - PreStartContainer allows Device Plugin to run device specific operations on -// the Devices requested +// - PreStartContainer is expected to be called before each container start if indicated by plugin during registration phase. +// - PreStartContainer allows kubelet to pass reinitialized devices to containers. +// - PreStartContainer allows Device Plugin to run device specific operations on +// the Devices requested type PreStartContainerRequest struct { DevicesIDs []string `protobuf:"bytes,1,rep,name=devices_ids,json=devicesIds,proto3" json:"devices_ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -717,12 +718,12 @@ func (m *ContainerPreferredAllocationResponse) GetDeviceIDs() []string { return nil } -// - Allocate is expected to be called during pod creation since allocation -// failures for any container would result in pod startup failure. -// - Allocate allows kubelet to exposes additional artifacts in a pod's -// environment as directed by the plugin. -// - Allocate allows Device Plugin to run device specific operations on -// the Devices requested +// - Allocate is expected to be called during pod creation since allocation +// failures for any container would result in pod startup failure. +// - Allocate allows kubelet to exposes additional artifacts in a pod's +// environment as directed by the plugin. +// - Allocate allows Device Plugin to run device specific operations on +// the Devices requested type AllocateRequest struct { ContainerRequests []*ContainerAllocateRequest `protobuf:"bytes,1,rep,name=container_requests,json=containerRequests,proto3" json:"container_requests,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go index d478726ab25..a1aa7469e4a 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go @@ -18,16 +18,18 @@ limitations under the License. // source: api.proto /* - Package pluginregistration is a generated protocol buffer package. +Package pluginregistration is a generated protocol buffer package. - It is generated from these files: - api.proto +It is generated from these files: - It has these top-level messages: - PluginInfo - RegistrationStatus - RegistrationStatusResponse - InfoRequest + api.proto + +It has these top-level messages: + + PluginInfo + RegistrationStatus + RegistrationStatusResponse + InfoRequest */ package pluginregistration diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go index d7bed4bd924..93ed45472b9 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go @@ -175,7 +175,9 @@ const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernet // ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service // to specify the protocol spoken by the backend (pod) behind a listener. // If `http` (default) or `https`, an HTTPS listener that terminates the -// connection and parses headers is created. +// +// connection and parses headers is created. +// // If set to `ssl` or `tcp`, a "raw" SSL listener is used. // If set to `http` and `aws-load-balancer-ssl-cert` is not used then // a HTTP listener is used. @@ -238,9 +240,9 @@ const ServiceAnnotationLoadBalancerTargetNodeLabels = "service.beta.kubernetes.i // subnetID or subnetName from different AZs // By default, the controller will auto-discover the subnets. If there are multiple subnets per AZ, auto-discovery // will break the tie in the following order - -// 1. prefer the subnet with the correct role tag. kubernetes.io/role/elb for public and kubernetes.io/role/internal-elb for private access -// 2. prefer the subnet with the cluster tag kubernetes.io/cluster/ -// 3. prefer the subnet that is first in lexicographic order +// 1. prefer the subnet with the correct role tag. kubernetes.io/role/elb for public and kubernetes.io/role/internal-elb for private access +// 2. prefer the subnet with the cluster tag kubernetes.io/cluster/ +// 3. prefer the subnet that is first in lexicographic order const ServiceAnnotationLoadBalancerSubnets = "service.beta.kubernetes.io/aws-load-balancer-subnets" // Event key when a volume is stuck on attaching state when being attached to a volume @@ -3751,8 +3753,8 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load // sortELBSecurityGroupList returns a list of sorted securityGroupIDs based on the original order // from buildELBSecurityGroupList. The logic is: -// * securityGroups specified by ServiceAnnotationLoadBalancerSecurityGroups appears first in order -// * securityGroups specified by ServiceAnnotationLoadBalancerExtraSecurityGroups appears last in order +// - securityGroups specified by ServiceAnnotationLoadBalancerSecurityGroups appears first in order +// - securityGroups specified by ServiceAnnotationLoadBalancerExtraSecurityGroups appears last in order func (c *Cloud) sortELBSecurityGroupList(securityGroupIDs []string, annotations map[string]string) { annotatedSGList := getSGListFromAnnotation(annotations[ServiceAnnotationLoadBalancerSecurityGroups]) annotatedExtraSGList := getSGListFromAnnotation(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups]) diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go index f295bca546f..6cca19b105d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go @@ -1240,7 +1240,8 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // syncElbListeners computes a plan to reconcile the desired vs actual state of the listeners on an ELB // NOTE: there exists an O(nlgn) implementation for this function. However, as the default limit of -// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n). +// +// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n). func syncElbListeners(loadBalancerName string, listeners []*elb.Listener, listenerDescriptions []*elb.ListenerDescription) ([]*elb.Listener, []*int64) { foundSet := make(map[int]bool) removals := []*int64{} diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/instances.go b/staging/src/k8s.io/legacy-cloud-providers/aws/instances.go index 7768deb3bb2..c2970382759 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/instances.go @@ -50,9 +50,9 @@ func (i InstanceID) awsString() *string { // KubernetesInstanceID represents the id for an instance in the kubernetes API; // the following form -// * aws://// -// * aws://// -// * +// - aws://// +// - aws://// +// - type KubernetesInstanceID string // MapToAWSInstanceID extracts the InstanceID from the KubernetesInstanceID diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/volumes.go b/staging/src/k8s.io/legacy-cloud-providers/aws/volumes.go index b106bd63f18..48fde1187fa 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/volumes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/volumes.go @@ -43,9 +43,9 @@ func (i EBSVolumeID) awsString() *string { // KubernetesVolumeID represents the id for a volume in the kubernetes API; // a few forms are recognized: -// * aws:/// -// * aws:/// -// * +// - aws:/// +// - aws:/// +// - type KubernetesVolumeID string // DiskInfo returns aws disk information in easy to use manner diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go index dd967b932f9..905f71c6c7b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController.go @@ -57,7 +57,7 @@ type storageAccountState struct { defaultContainerCreated bool } -//BlobDiskController : blob disk controller struct +// BlobDiskController : blob disk controller struct type BlobDiskController struct { common *controllerCommon accounts map[string]*storageAccountState @@ -227,7 +227,7 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str return blob.Delete(nil) } -//CreateBlobDisk : create a blob disk in a node +// CreateBlobDisk : create a blob disk in a node func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) { klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) @@ -253,7 +253,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT return diskURI, nil } -//DeleteBlobDisk : delete a blob disk from a node +// DeleteBlobDisk : delete a blob disk from a node func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { storageAccountName, vhdName, err := diskNameAndSANameFromURI(diskURI) if err != nil { @@ -602,7 +602,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam return SAName, nil } -//Gets storage account exist, provisionStatus, Error if any +// Gets storage account exist, provisionStatus, Error if any func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) { ctx, cancel := getContextWithCancel() defer cancel() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go index f02fc4e2e0f..4af0f0dd4b4 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -369,7 +369,8 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string // InstanceType returns the type of the specified instance. // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet: -// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] +// +// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) { // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(name)) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index b63194f8e9e..c9b82487ebb 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -2142,7 +2142,7 @@ func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lb (ipTagRequest.IPTagsRequestedByAnnotation && !areIPTagsEquivalent(currentIPTags, ipTagRequest.IPTags)) } -// ensurePIPTagged ensures the public IP of the service is tagged as configured +// ensurePIPTagged ensures the public IP of the service is tagged as configured func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddress) bool { configTags := parseTags(az.Tags) annotationTags := make(map[string]*string) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index e7eb3d59e65..9eb6ba2acc8 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -46,7 +46,7 @@ const ( diskEncryptionSetIDFormat = "/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}" ) -//ManagedDiskController : managed disk controller struct +// ManagedDiskController : managed disk controller struct type ManagedDiskController struct { common *controllerCommon } @@ -81,7 +81,7 @@ type ManagedDiskOptions struct { MaxShares int32 } -//CreateManagedDisk : create managed disk +// CreateManagedDisk : create managed disk func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) { var err error klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) @@ -212,7 +212,7 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( return diskID, nil } -//DeleteManagedDisk : delete managed disk +// DeleteManagedDisk : delete managed disk func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go index d15d2659bba..900160ff4cc 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go @@ -74,8 +74,9 @@ func (op *delayedRouteOperation) wait() error { // delayedRouteUpdater defines a delayed route updater, which batches all the // route updating operations within "interval" period. // Example usage: -// op, err := updater.addRouteOperation(routeOperationAdd, route) -// err = op.wait() +// +// op, err := updater.addRouteOperation(routeOperationAdd, route) +// err = op.wait() type delayedRouteUpdater struct { az *Cloud interval time.Duration @@ -553,7 +554,7 @@ func findFirstIPByFamily(ips []string, v6 bool) (string, error) { return "", fmt.Errorf("no match found matching the ipfamily requested") } -//strips : . / +// strips : . / func cidrtoRfc1035(cidr string) string { cidr = strings.ReplaceAll(cidr, ":", "") cidr = strings.ReplaceAll(cidr, ".", "") @@ -561,7 +562,7 @@ func cidrtoRfc1035(cidr string) string { return cidr } -// ensureRouteTableTagged ensures the route table is tagged as configured +// ensureRouteTableTagged ensures the route table is tagged as configured func (az *Cloud) ensureRouteTableTagged(rt *network.RouteTable) (map[string]*string, bool) { if az.Tags == "" { return nil, false diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go index 28ddfe0409b..b12247f4a53 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -271,8 +271,10 @@ func isInternalLoadBalancer(lb *network.LoadBalancer) bool { // SingleStack -v4 (pre v1.16) => BackendPool name == clusterName // SingleStack -v6 => BackendPool name == -IPv6 (all cluster bootstrap uses this name) // DualStack -// => IPv4 BackendPool name == clusterName -// => IPv6 BackendPool name == -IPv6 +// +// => IPv4 BackendPool name == clusterName +// => IPv6 BackendPool name == -IPv6 +// // This means: // clusters moving from IPv4 to dualstack will require no changes // clusters moving from IPv6 to dualstack will require no changes as the IPv4 backend pool will created with @@ -425,7 +427,7 @@ outer: var polyTable = crc32.MakeTable(crc32.Koopman) -//MakeCRC32 : convert string to CRC32 format +// MakeCRC32 : convert string to CRC32 format func MakeCRC32(str string) string { crc := crc32.New(polyTable) crc.Write([]byte(str)) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index c6b18407467..cdc43a6ee9b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -359,10 +359,10 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { // GetNodeNameByProviderID gets the node name by provider ID. // providerID example: -// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0 -// 2. vmss providerID: -// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1 -// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1 +// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0 +// 2. vmss providerID: +// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1 +// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1 func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { // NodeName is not part of providerID for vmss instances. scaleSetName, err := extractScaleSetNameByProviderID(providerID) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go index 915051529f9..9ebdec2d065 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_wrap.go @@ -62,9 +62,9 @@ func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) { return false, err } -/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache -/// The service side has throttling control that delays responses if there are multiple requests onto certain vm -/// resource request in short period. +// / getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache +// / The service side has throttling control that delays responses if there are multiple requests onto certain vm +// / resource request in short period. func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) { vmName := string(nodeName) cachedVM, err := az.vmCache.Get(vmName, crt) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go index 09feed922a5..035ff25bbeb 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -373,7 +373,7 @@ func removeFinalizer(service *v1.Service, kubeClient v1core.CoreV1Interface, key return err } -//hasFinalizer returns if the given service has the specified key in its list of finalizers. +// hasFinalizer returns if the given service has the specified key in its list of finalizers. func hasFinalizer(service *v1.Service, key string) bool { for _, finalizer := range service.ObjectMeta.Finalizers { if finalizer == key { diff --git a/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go index 8a29760d92d..c735fbc0929 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack_loadbalancer.go @@ -522,7 +522,7 @@ func nodeAddressForLB(node *v1.Node) (string, error) { return "", ErrNoAddressFound } -//getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting +// getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting func getStringFromServiceAnnotation(service *v1.Service, annotationKey string, defaultSetting string) string { klog.V(4).Infof("getStringFromServiceAnnotation(%v, %v, %v)", service, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go index 65586e0ea50..d7b826b7d7c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go @@ -437,9 +437,9 @@ func (nodeInfo *NodeInfo) VM() *vclib.VirtualMachine { // vcConnect connects to vCenter with existing credentials // If credentials are invalid: -// 1. It will fetch credentials from credentialManager -// 2. Update the credentials -// 3. Connects again to vCenter with fetched credentials +// 1. It will fetch credentials from credentialManager +// 2. Update the credentials +// 3. Connects again to vCenter with fetched credentials func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereInstance) error { err := vsphereInstance.conn.Connect(ctx) if err == nil { diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go index 3662248d596..0dd3824d4ab 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/utils.go @@ -146,7 +146,7 @@ func GetPathFromVMDiskPath(vmDiskPath string) string { return datastorePathObj.Path } -//GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. +// GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) @@ -157,7 +157,7 @@ func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath return datastorePathObj, nil } -//IsValidUUID checks if the string is a valid UUID. +// IsValidUUID checks if the string is a valid UUID. func IsValidUUID(uuid string) bool { r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") return r.MatchString(uuid) diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go index c00d19de153..d2d34302ee0 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go @@ -58,7 +58,8 @@ import ( ) // localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = `-----BEGIN CERTIFICATE----- MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/fake/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/fake/register.go index 79b64c15fa6..dfc7b93d59b 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/fake/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/scheme/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/scheme/register.go index 361fe99e15a..a92b020a9ff 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/scheme/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/deprecated/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/register.go index 29310bff13f..6d82cc0c76f 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go index 361fe99e15a..a92b020a9ff 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/metrics/pkg/client/custom_metrics/scheme/register.go b/staging/src/k8s.io/metrics/pkg/client/custom_metrics/scheme/register.go index 703a51a7042..32ad059336c 100644 --- a/staging/src/k8s.io/metrics/pkg/client/custom_metrics/scheme/register.go +++ b/staging/src/k8s.io/metrics/pkg/client/custom_metrics/scheme/register.go @@ -49,14 +49,14 @@ func init() { // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/mount-utils/mount.go b/staging/src/k8s.io/mount-utils/mount.go index eca644046f3..a93e6d2fab8 100644 --- a/staging/src/k8s.io/mount-utils/mount.go +++ b/staging/src/k8s.io/mount-utils/mount.go @@ -273,7 +273,8 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { // MakeBindOpts detects whether a bind mount is being requested and makes the remount options to // use in case of bind mount, due to the fact that bind mount doesn't respect mount options. // The list equals: -// options - 'bind' + 'remount' (no duplicate) +// +// options - 'bind' + 'remount' (no duplicate) func MakeBindOpts(options []string) (bool, []string, []string) { bind, bindOpts, bindRemountOpts, _ := MakeBindOptsSensitive(options, nil /* sensitiveOptions */) return bind, bindOpts, bindRemountOpts diff --git a/staging/src/k8s.io/pod-security-admission/policy/check_seccompProfile_baseline.go b/staging/src/k8s.io/pod-security-admission/policy/check_seccompProfile_baseline.go index 55152b3e6a7..d45dba70763 100644 --- a/staging/src/k8s.io/pod-security-admission/policy/check_seccompProfile_baseline.go +++ b/staging/src/k8s.io/pod-security-admission/policy/check_seccompProfile_baseline.go @@ -27,7 +27,6 @@ import ( ) /* - If seccomp profiles are specified, only runtime default and localhost profiles are allowed. v1.0 - v1.18: @@ -44,7 +43,6 @@ spec.containers[*].securityContext.seccompProfile.type spec.initContainers[*].securityContext.seccompProfile.type **Allowed Values:** 'RuntimeDefault', 'Localhost', undefined - */ const ( annotationKeyPod = "seccomp.security.alpha.kubernetes.io/pod" diff --git a/staging/src/k8s.io/pod-security-admission/policy/checks.go b/staging/src/k8s.io/pod-security-admission/policy/checks.go index 105c4ee0bf0..6b13d2f1cf3 100644 --- a/staging/src/k8s.io/pod-security-admission/policy/checks.go +++ b/staging/src/k8s.io/pod-security-admission/policy/checks.go @@ -58,10 +58,11 @@ type CheckID string // and if not, why it was forbidden. // // Example output for (false, "host ports", "8080, 9090"): -// When checking all pods in a namespace: -// disallowed by policy "baseline": host ports, privileged containers, non-default capabilities -// When checking an individual pod: -// disallowed by policy "baseline": host ports (8080, 9090), privileged containers, non-default capabilities (CAP_NET_RAW) +// +// When checking all pods in a namespace: +// disallowed by policy "baseline": host ports, privileged containers, non-default capabilities +// When checking an individual pod: +// disallowed by policy "baseline": host ports (8080, 9090), privileged containers, non-default capabilities (CAP_NET_RAW) type CheckResult struct { // Allowed indicates if the check allowed the pod. Allowed bool diff --git a/staging/src/k8s.io/pod-security-admission/test/fixtures_test.go b/staging/src/k8s.io/pod-security-admission/test/fixtures_test.go index 959ab0d18f1..21654926b65 100644 --- a/staging/src/k8s.io/pod-security-admission/test/fixtures_test.go +++ b/staging/src/k8s.io/pod-security-admission/test/fixtures_test.go @@ -40,7 +40,7 @@ const updateEnvVar = "UPDATE_POD_SECURITY_FIXTURE_DATA" // and that in-memory fixtures match serialized fixtures in testdata. // When adding new versions or checks, serialized fixtures can be updated by running: // -// UPDATE_POD_SECURITY_FIXTURE_DATA=true go test k8s.io/pod-security-admission/test +// UPDATE_POD_SECURITY_FIXTURE_DATA=true go test k8s.io/pod-security-admission/test func TestFixtures(t *testing.T) { expectedFiles := sets.NewString("testdata/README.md") diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/register.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/register.go index d9b56b98e72..76ad0dd07da 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/fake/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme/register.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme/register.go index c9f26dfab0b..8028c1e497e 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme/register.go @@ -39,14 +39,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go index 139da2d4649..cef3b163ad1 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go @@ -2368,7 +2368,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Type: []string{"object"}, }, }, @@ -2379,7 +2379,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "apiVersion": { diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/register.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/register.go index bfaa6213fac..74013e46423 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/fake/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme/register.go b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme/register.go index 27c9b7946fc..c3378226dd1 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme/register.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/test/conformance/doc.go b/test/conformance/doc.go index f8b0d6fbee2..26e5279e0af 100644 --- a/test/conformance/doc.go +++ b/test/conformance/doc.go @@ -17,11 +17,11 @@ limitations under the License. /* This stand-alone package is utilized for dynamically generating/maintaining a list of conformance tests. It utilizes a two step approach: - - The test binary is built - - The test binary is run in dry mode with a custom ginkgo reporter dumping out - types.SpecSummary objects which contain full test names and file/code information. - - The SpecSummary information is parsed to get file/line info on Conformance tests and - then we use a simplified AST parser to grab the comments above the test. + - The test binary is built + - The test binary is run in dry mode with a custom ginkgo reporter dumping out + types.SpecSummary objects which contain full test names and file/code information. + - The SpecSummary information is parsed to get file/line info on Conformance tests and + then we use a simplified AST parser to grab the comments above the test. Due to the complicated nature of how tests can be declared/wrapped in various contexts, this approach is much simpler to maintain than a pure-AST parser and allows us to easily diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 5745550fcb3..07c677e39e3 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -203,9 +203,10 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { // This test must run [Serial] due to the impact of running other parallel // tests can have on its performance. Each test that follows the common // test framework follows this pattern: -// 1. Create a Namespace -// 2. Do work that generates content in that namespace -// 3. Delete a Namespace +// 1. Create a Namespace +// 2. Do work that generates content in that namespace +// 3. Delete a Namespace +// // Creation of a Namespace is non-trivial since it requires waiting for a // ServiceAccount to be generated. // Deletion of a Namespace is non-trivial and performance intensive since diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 31243cde40a..463179c4f5f 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -28,7 +28,6 @@ import ( ) // These tests don't seem to be running properly in parallel: issue: #20338. -// var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() { f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f.NamespacePodSecurityEnforceLevel = api.LevelBaseline diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 661f6f0b80a..fe38d5bf643 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -32,8 +32,9 @@ import ( ) // TODO: Those tests should be splitted by SIG and moved to SIG-owned directories, -// however that involves also splitting the actual upgrade jobs too. -// Figure out the eventual solution for it. +// +// however that involves also splitting the actual upgrade jobs too. +// Figure out the eventual solution for it. var upgradeTests = []upgrades.Test{ &apps.DaemonSetUpgradeTest{}, &apps.DeploymentUpgradeTest{}, diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index c8eecfe311b..57e50e1d3db 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -229,12 +229,12 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName } // rebootNode takes node name on provider through the following steps using c: -// - ensures the node is ready -// - ensures all pods on the node are running and ready -// - reboots the node (by executing rebootCmd over ssh) -// - ensures the node reaches some non-ready state -// - ensures the node becomes ready again -// - ensures all pods on the node become running and ready again +// - ensures the node is ready +// - ensures all pods on the node are running and ready +// - reboots the node (by executing rebootCmd over ssh) +// - ensures the node reaches some non-ready state +// - ensures the node becomes ready again +// - ensures all pods on the node become running and ready again // // It returns true through result only if all of the steps pass; at the first // failed step, it will return false through result and not run the rest. diff --git a/test/e2e/common/storage/host_path.go b/test/e2e/common/storage/host_path.go index 650186f5f5b..1d3526fddec 100644 --- a/test/e2e/common/storage/host_path.go +++ b/test/e2e/common/storage/host_path.go @@ -30,8 +30,8 @@ import ( "github.com/onsi/ginkgo/v2" ) -//TODO : Consolidate this code with the code for emptyDir. -//This will require some smart. +// TODO : Consolidate this code with the code for emptyDir. +// This will require some smart. var _ = SIGDescribe("HostPath", func() { f := framework.NewDefaultFramework("hostpath") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged @@ -130,8 +130,8 @@ var _ = SIGDescribe("HostPath", func() { }) }) -//These constants are borrowed from the other test. -//const volumeName = "test-volume" +// These constants are borrowed from the other test. +// const volumeName = "test-volume" const containerName1 = "test-container-1" const containerName2 = "test-container-2" @@ -146,7 +146,7 @@ func mount(source *v1.HostPathVolumeSource) []v1.Volume { } } -//TODO: To merge this with the emptyDir tests, we can make source a lambda. +// TODO: To merge this with the emptyDir tests, we can make source a lambda. func testPodWithHostVol(path string, source *v1.HostPathVolumeSource, privileged bool) *v1.Pod { podName := "pod-host-path-test" diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index 118cc51d15a..eb90996daab 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -763,7 +763,7 @@ func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) { rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) } -//SidecarStatusType type for sidecar status +// SidecarStatusType type for sidecar status type SidecarStatusType bool const ( @@ -771,7 +771,7 @@ const ( Disable SidecarStatusType = false ) -//SidecarWorkloadType type of the sidecar +// SidecarWorkloadType type of the sidecar type SidecarWorkloadType string const ( diff --git a/test/e2e/framework/config/config.go b/test/e2e/framework/config/config.go index 356cd1c33e8..4936c1f7b5a 100644 --- a/test/e2e/framework/config/config.go +++ b/test/e2e/framework/config/config.go @@ -22,13 +22,13 @@ limitations under the License. // The command line flags all get stored in a private flag set. The // developer of the E2E test suite decides how they are exposed. Options // include: -// - exposing as normal flags in the actual command line: -// CopyFlags(Flags, flag.CommandLine) -// - populate via test/e2e/framework/viperconfig: -// viperconfig.ViperizeFlags("my-config.yaml", "", Flags) -// - a combination of both: -// CopyFlags(Flags, flag.CommandLine) -// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine) +// - exposing as normal flags in the actual command line: +// CopyFlags(Flags, flag.CommandLine) +// - populate via test/e2e/framework/viperconfig: +// viperconfig.ViperizeFlags("my-config.yaml", "", Flags) +// - a combination of both: +// CopyFlags(Flags, flag.CommandLine) +// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine) // // Instead of defining flags one-by-one, test developers annotate a // structure with tags and then call a single function. This is the @@ -38,16 +38,16 @@ limitations under the License. // // For example, a file storage/csi.go might define: // -// var scaling struct { -// NumNodes int `default:"1" description:"number of nodes to run on"` -// Master string -// } -// _ = config.AddOptions(&scaling, "storage.csi.scaling") +// var scaling struct { +// NumNodes int `default:"1" description:"number of nodes to run on"` +// Master string +// } +// _ = config.AddOptions(&scaling, "storage.csi.scaling") // // This defines the following command line flags: // -// -storage.csi.scaling.numNodes= - number of nodes to run on (default: 1) -// -storage.csi.scaling.master= +// -storage.csi.scaling.numNodes= - number of nodes to run on (default: 1) +// -storage.csi.scaling.master= // // All fields in the structure must be exported and have one of the following // types (same as in the `flag` package): @@ -63,10 +63,10 @@ limitations under the License. // // Each basic entry may have a tag with these optional keys: // -// usage: additional explanation of the option -// default: the default value, in the same format as it would -// be given on the command line and true/false for -// a boolean +// usage: additional explanation of the option +// default: the default value, in the same format as it would +// be given on the command line and true/false for +// a boolean // // The names of the final configuration options are a combination of an // optional common prefix for all options in the structure and the diff --git a/test/e2e/framework/internal/output/output.go b/test/e2e/framework/internal/output/output.go index c5121c88ffd..e167e05a0d1 100644 --- a/test/e2e/framework/internal/output/output.go +++ b/test/e2e/framework/internal/output/output.go @@ -126,10 +126,15 @@ var functionArgs = regexp.MustCompile(`([[:alpha:]]+)\(.*\)`) // testFailureOutput matches TestFailureOutput() and its source followed by additional stack entries: // // k8s.io/kubernetes/test/e2e/framework/pod/pod_test.TestFailureOutput(0xc000558800) +// // /nvme/gopath/src/k8s.io/kubernetes/test/e2e/framework/pod/wait_test.go:73 +0x1c9 +// // testing.tRunner(0xc000558800, 0x1af2848) -// /nvme/gopath/go/src/testing/testing.go:865 +0xc0 +// +// /nvme/gopath/go/src/testing/testing.go:865 +0xc0 +// // created by testing.(*T).Run +// // /nvme/gopath/go/src/testing/testing.go:916 +0x35a var testFailureOutput = regexp.MustCompile(`(?m)^k8s.io/kubernetes/test/e2e/framework/internal/output\.TestGinkgoOutput\(.*\n\t.*(\n.*\n\t.*)*`) diff --git a/test/e2e/framework/log.go b/test/e2e/framework/log.go index 5c93e4f4882..86590c902e3 100644 --- a/test/e2e/framework/log.go +++ b/test/e2e/framework/log.go @@ -72,10 +72,10 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`) // entries coming from Ginkgo. // // This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25: -// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) -// - source code filtering updated to be specific to Kubernetes -// - optimized to use bytes and in-place slice filtering from -// https://github.com/golang/go/wiki/SliceTricks#filter-in-place +// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) +// - source code filtering updated to be specific to Kubernetes +// - optimized to use bytes and in-place slice filtering from +// https://github.com/golang/go/wiki/SliceTricks#filter-in-place func PrunedStack(skip int) []byte { fullStackTrace := debug.Stack() stack := bytes.Split(fullStackTrace, []byte("\n")) diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index 0d4843e7407..5fd9f4288ac 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -282,18 +282,19 @@ func makeCURLDialCommand(ipPort, dialCmd, protocol, targetIP string, targetPort // DialFromContainer executes a curl via kubectl exec in a test container, // which might then translate to a tcp or udp request based on the protocol // argument in the url. -// - minTries is the minimum number of curl attempts required before declaring -// success. Set to 0 if you'd like to return as soon as all endpoints respond -// at least once. -// - maxTries is the maximum number of curl attempts. If this many attempts pass -// and we don't see all expected endpoints, the test fails. -// - targetIP is the source Pod IP that will dial the given dialCommand using the given protocol. -// - dialCommand is the command that the targetIP will send to the targetIP using the given protocol. -// the dialCommand should be formatted properly for the protocol (http: URL path+parameters, -// udp: command%20parameters, where parameters are optional) -// - expectedResponses is the unordered set of responses to wait for. The responses are based on -// the dialCommand; for example, for the dialCommand "hostname", the expectedResponses -// should contain the hostnames reported by each pod in the service through /hostName. +// - minTries is the minimum number of curl attempts required before declaring +// success. Set to 0 if you'd like to return as soon as all endpoints respond +// at least once. +// - maxTries is the maximum number of curl attempts. If this many attempts pass +// and we don't see all expected endpoints, the test fails. +// - targetIP is the source Pod IP that will dial the given dialCommand using the given protocol. +// - dialCommand is the command that the targetIP will send to the targetIP using the given protocol. +// the dialCommand should be formatted properly for the protocol (http: URL path+parameters, +// udp: command%20parameters, where parameters are optional) +// - expectedResponses is the unordered set of responses to wait for. The responses are based on +// the dialCommand; for example, for the dialCommand "hostname", the expectedResponses +// should contain the hostnames reported by each pod in the service through /hostName. +// // maxTries == minTries will confirm that we see the expected endpoints and no // more for maxTries. Use this if you want to eg: fail a readiness check on a // pod and confirm it doesn't show up as an endpoint. @@ -346,8 +347,8 @@ func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targ // GetEndpointsFromContainer executes a curl via kubectl exec in a test container, // which might then translate to a tcp or udp request based on the protocol argument // in the url. It returns all different endpoints from multiple retries. -// - tries is the number of curl attempts. If this many attempts pass and -// we don't see any endpoints, the test fails. +// - tries is the number of curl attempts. If this many attempts pass and +// we don't see any endpoints, the test fails. func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containerIP, targetIP string, containerHTTPPort, targetPort, tries int) (sets.String, error) { ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort)) cmd := makeCURLDialCommand(ipPort, "hostName", protocol, targetIP, targetPort) @@ -431,17 +432,17 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP // DialFromNode executes a tcp/udp curl/nc request based on protocol via kubectl exec // in a test container running with host networking. -// - minTries is the minimum number of curl/nc attempts required before declaring -// success. If 0, then we return as soon as all endpoints succeed. -// - There is no logical change to test results if faillures happen AFTER endpoints have succeeded, -// hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet -// (See the TODO about checking probability, which isnt implemented yet). -// - maxTries is the maximum number of curl/echo attempts before an error is returned. The -// smaller this number is, the less 'slack' there is for declaring success. -// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit. -// - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without -// success on all endpoints). -// In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic. +// - minTries is the minimum number of curl/nc attempts required before declaring +// success. If 0, then we return as soon as all endpoints succeed. +// - There is no logical change to test results if faillures happen AFTER endpoints have succeeded, +// hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet +// (See the TODO about checking probability, which isnt implemented yet). +// - maxTries is the maximum number of curl/echo attempts before an error is returned. The +// smaller this number is, the less 'slack' there is for declaring success. +// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit. +// - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without +// success on all endpoints). +// In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic. func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) error { var cmd string if protocol == "udp" { @@ -1112,13 +1113,13 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1 // slow down the test and cause it to fail if DNS is absent or broken. // // Suggested usage pattern: -// func foo() { -// ... -// defer UnblockNetwork(from, to) -// BlockNetwork(from, to) -// ... -// } // +// func foo() { +// ... +// defer UnblockNetwork(from, to) +// BlockNetwork(from, to) +// ... +// } func BlockNetwork(from string, to string) { framework.Logf("block network traffic from %s to %s", from, to) iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to) diff --git a/test/e2e/framework/providers/azure/azure.go b/test/e2e/framework/providers/azure/azure.go index 0ec9a5bfa99..7c6bb217dfa 100644 --- a/test/e2e/framework/providers/azure/azure.go +++ b/test/e2e/framework/providers/azure/azure.go @@ -51,7 +51,7 @@ func newProvider() (framework.ProviderInterface, error) { }, err } -//Provider is a structure to handle Azure clouds for e2e testing +// Provider is a structure to handle Azure clouds for e2e testing type Provider struct { framework.NullProvider diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index c1a8e5a08c1..f16cd7f0c8b 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -80,13 +80,15 @@ type pvcval struct{} // present. We must always Get the pvc object before referencing any of its values, eg. // its VolumeName. // Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is -// unpredictable and can result in the same key being iterated over again. +// +// unpredictable and can result in the same key being iterated over again. type PVCMap map[types.NamespacedName]pvcval // PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object // for varying storage options (NFS, ceph, glusterFS, etc.). // (+optional) prebind holds a pre-bound PVC // Example pvSource: +// // pvSource: api.PersistentVolumeSource{ // NFS: &api.NFSVolumeSource{ // ... @@ -257,7 +259,8 @@ func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutCo // are deleted. Validates that the claim was deleted and the PV is in the expected Phase (Released, // Available, Bound). // Note: if there are more claims than pvs then some of the remaining claims may bind to just made -// available pvs. +// +// available pvs. func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error { var boundPVs, deletedPVCs int @@ -347,8 +350,9 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) // namespace. If the "preBind" bool is true then pre-bind the PV to the PVC // via the PV's ClaimRef. Return the pv and pvc to reflect the created objects. // Note: in the pre-bind case the real PVC name, which is generated, is not -// known until after the PVC is instantiated. This is why the pvc is created -// before the pv. +// +// known until after the PVC is instantiated. This is why the pvc is created +// before the pv. func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { // make the pvc spec pvc := MakePersistentVolumeClaim(pvcConfig, ns) @@ -382,8 +386,9 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo // via the PVC's VolumeName. Return the pv and pvc to reflect the created // objects. // Note: in the pre-bind case the real PV name, which is generated, is not -// known until after the PV is instantiated. This is why the pv is created -// before the pvc. +// +// known until after the PV is instantiated. This is why the pv is created +// before the pvc. func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) { preBindMsg := "" if preBind { @@ -417,7 +422,8 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo // entries for the resources that were successfully created. In other words, when the caller // sees an error returned, it needs to decide what to do about entries in the maps. // Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However, -// orphaned pvs are not deleted and will remain after the suite completes. +// +// orphaned pvs are not deleted and will remain after the suite completes. func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) { pvMap := make(PVMap, numpvs) pvcMap := make(PVCMap, numpvcs) @@ -504,10 +510,11 @@ func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, n // WaitAndVerifyBinds searches for bound PVs and PVCs by examining pvols for non-nil claimRefs. // NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound, -// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead -// to situations where the maximum wait times are reached several times in succession, -// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs -// small. +// +// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead +// to situations where the maximum wait times are reached several times in succession, +// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs +// small. func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error { var actualBinds int expectedBinds := len(pvols) @@ -563,8 +570,9 @@ func makePvcKey(ns, name string) types.NamespacedName { // If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy // is assigned, assumes "Retain". Specs are expected to match the test's PVC. // Note: the passed-in claim does not have a name until it is created and thus the PV's -// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name -// is added later in CreatePVCPV. +// +// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name +// is added later in CreatePVCPV. func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume { var claimRef *v1.ObjectReference diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index b230523a099..8e87e2a6c37 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -961,10 +961,10 @@ func (j *TestJig) checkClusterIPServiceReachability(svc *v1.Service, pod *v1.Pod } // checkNodePortServiceReachability ensures that service of type nodePort are reachable -// - Internal clients should be reachable to service over - -// ServiceName:ServicePort, ClusterIP:ServicePort and NodeInternalIPs:NodePort -// - External clients should be reachable to service over - -// NodePublicIPs:NodePort +// - Internal clients should be reachable to service over - +// ServiceName:ServicePort, ClusterIP:ServicePort and NodeInternalIPs:NodePort +// - External clients should be reachable to service over - +// NodePublicIPs:NodePort func (j *TestJig) checkNodePortServiceReachability(svc *v1.Service, pod *v1.Pod) error { clusterIP := svc.Spec.ClusterIP servicePorts := svc.Spec.Ports diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index adbda516285..8e93ac8b3b8 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -55,24 +55,24 @@ const ( // into the code which uses the settings. // // The recommendation for those settings is: -// - They are stored in their own context structure or local -// variables. -// - The standard `flag` package is used to register them. -// The flag name should follow the pattern ..... -// where the prefix is unlikely to conflict with other tests or -// standard packages and each part is in lower camel case. For -// example, test/e2e/storage/csi/context.go could define -// storage.csi.numIterations. -// - framework/config can be used to simplify the registration of -// multiple options with a single function call: -// var storageCSI { -// NumIterations `default:"1" usage:"number of iterations"` -// } -// _ config.AddOptions(&storageCSI, "storage.csi") -// - The direct use Viper in tests is possible, but discouraged because -// it only works in test suites which use Viper (which is not -// required) and the supported options cannot be -// discovered by a test suite user. +// - They are stored in their own context structure or local +// variables. +// - The standard `flag` package is used to register them. +// The flag name should follow the pattern ..... +// where the prefix is unlikely to conflict with other tests or +// standard packages and each part is in lower camel case. For +// example, test/e2e/storage/csi/context.go could define +// storage.csi.numIterations. +// - framework/config can be used to simplify the registration of +// multiple options with a single function call: +// var storageCSI { +// NumIterations `default:"1" usage:"number of iterations"` +// } +// _ config.AddOptions(&storageCSI, "storage.csi") +// - The direct use Viper in tests is possible, but discouraged because +// it only works in test suites which use Viper (which is not +// required) and the supported options cannot be +// discovered by a test suite user. // // Test suite authors can use framework/viper to make all command line // parameters also configurable via a configuration file. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 3429ec2ba1a..1274e12396d 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -438,7 +438,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out") } -//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. +// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) @@ -1374,18 +1374,22 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool { // WatchEventSequenceVerifier ... // manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure -// testContext cancelation signal across API boundries, e.g: context.TODO() -// dc sets up a client to the API -// resourceType specify the type of resource -// namespace select a namespace -// resourceName the name of the given resource -// listOptions options used to find the resource, recommended to use listOptions.labelSelector -// expectedWatchEvents array of events which are expected to occur -// scenario the test itself -// retryCleanup a function to run which ensures that there are no dangling resources upon test failure +// +// testContext cancelation signal across API boundries, e.g: context.TODO() +// dc sets up a client to the API +// resourceType specify the type of resource +// namespace select a namespace +// resourceName the name of the given resource +// listOptions options used to find the resource, recommended to use listOptions.labelSelector +// expectedWatchEvents array of events which are expected to occur +// scenario the test itself +// retryCleanup a function to run which ensures that there are no dangling resources upon test failure +// // this tooling relies on the test to return the events as they occur // the entire scenario must be run to ensure that the desired watch events arrive in order (allowing for interweaving of watch events) -// if an expected watch event is missing we elect to clean up and run the entire scenario again +// +// if an expected watch event is missing we elect to clean up and run the entire scenario again +// // we try the scenario three times to allow the sequencing to fail a couple of times func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) { listWatcher := &cache.ListWatch{ diff --git a/test/e2e/network/netpol/kubemanager.go b/test/e2e/network/netpol/kubemanager.go index 3121f6386a5..cacd0c2e9a3 100644 --- a/test/e2e/network/netpol/kubemanager.go +++ b/test/e2e/network/netpol/kubemanager.go @@ -47,8 +47,8 @@ type probeConnectivityArgs struct { // kubeManager provides a convenience interface to kube functionality that we leverage for polling NetworkPolicy connections. // Its responsibilities are: -// - creating resources (pods, deployments, namespaces, services, network policies) -// - modifying and cleaning up resources +// - creating resources (pods, deployments, namespaces, services, network policies) +// - modifying and cleaning up resources type kubeManager struct { framework *framework.Framework clientSet clientset.Interface diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index 3cb9e54d6b7..c6f2a7c8844 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -124,17 +124,18 @@ func iperf2ClientDaemonSet(client clientset.Interface, namespace string) (*appsv } // Test summary: -// This test uses iperf2 to obtain bandwidth data between nodes in the cluster, providing a coarse measure -// of the health of the cluster network. The test runs two sets of pods: -// 1. an iperf2 server on a single node -// 2. a daemonset of iperf2 clients -// The test then iterates through the clients, one by one, running iperf2 from each of them to transfer -// data to the server and back for ten seconds, after which the results are collected and parsed. -// Thus, if your cluster has 10 nodes, then 10 test runs are performed. -// Note: a more complete test could run this scenario with a daemonset of servers as well; however, this -// would require n^2 tests, n^2 time, and n^2 network resources which quickly become prohibitively large -// as the cluster size increases. -// Finally, after collecting all data, the results are analyzed and tabulated. +// +// This test uses iperf2 to obtain bandwidth data between nodes in the cluster, providing a coarse measure +// of the health of the cluster network. The test runs two sets of pods: +// 1. an iperf2 server on a single node +// 2. a daemonset of iperf2 clients +// The test then iterates through the clients, one by one, running iperf2 from each of them to transfer +// data to the server and back for ten seconds, after which the results are collected and parsed. +// Thus, if your cluster has 10 nodes, then 10 test runs are performed. +// Note: a more complete test could run this scenario with a daemonset of servers as well; however, this +// would require n^2 tests, n^2 time, and n^2 network resources which quickly become prohibitively large +// as the cluster size increases. +// Finally, after collecting all data, the results are analyzed and tabulated. var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", func() { // this test runs iperf2: one pod as a server, and a daemonset of clients f := framework.NewDefaultFramework("network-perf") diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index 4f9097dc1d2..d25daa1b72b 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -227,8 +227,8 @@ func newQuerier() *endpointQueries { // join merges the incoming streams of requests and added endpoints. It has // nice properties like: -// * remembering an endpoint if it happens to arrive before it is requested. -// * closing all outstanding requests (returning nil) if it is stopped. +// - remembering an endpoint if it happens to arrive before it is requested. +// - closing all outstanding requests (returning nil) if it is stopped. func (eq *endpointQueries) join() { defer func() { // Terminate all pending requests, so that no goroutine will diff --git a/test/e2e/storage/framework/driver_operations.go b/test/e2e/storage/framework/driver_operations.go index c1ca2f20009..b3326a7fd44 100644 --- a/test/e2e/storage/framework/driver_operations.go +++ b/test/e2e/storage/framework/driver_operations.go @@ -27,8 +27,8 @@ import ( // GetDriverNameWithFeatureTags returns driver name with feature tags // For example) -// - [Driver: nfs] -// - [Driver: rbd][Feature:Volumes] +// - [Driver: nfs] +// - [Driver: rbd][Feature:Volumes] func GetDriverNameWithFeatureTags(driver TestDriver) string { dInfo := driver.GetDriverInfo() diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 125cd521671..c3276656d3f 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -60,7 +60,8 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv * // PV. Ensure each step succeeds. // Note: the PV is deleted in the AfterEach, not here. // Note: this func is serialized, we wait for each pod to be deleted before creating the -// next pod. Adding concurrency is a TODO item. +// +// next pod. Adding concurrency is a TODO item. func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols e2epv.PVMap, claims e2epv.PVCMap, expectPhase v1.PersistentVolumePhase) error { var err error @@ -442,7 +443,8 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v // createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod. // Note: need named return value so that the err assignment in the defer sets the returned error. -// Has been shown to be necessary using Go 1.7. +// +// Has been shown to be necessary using Go 1.7. func createWaitAndDeletePod(c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { framework.Logf("Creating nfs test pod") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) diff --git a/test/e2e/storage/podlogs/podlogs.go b/test/e2e/storage/podlogs/podlogs.go index 7ab3bf85d46..692078b8362 100644 --- a/test/e2e/storage/podlogs/podlogs.go +++ b/test/e2e/storage/podlogs/podlogs.go @@ -258,7 +258,9 @@ func CopyPodLogs(ctx context.Context, cs clientset.Interface, ns, podName string // logsForPod starts reading the logs for a certain pod. If the pod has more than one // container, opts.Container must be set. Reading stops when the context is done. // The stream includes formatted error messages and ends with -// rpc error: code = Unknown desc = Error: No such container: 41a... +// +// rpc error: code = Unknown desc = Error: No such container: 41a... +// // when the pod gets deleted while streaming. func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) { return cs.CoreV1().Pods(ns).GetLogs(pod, opts).Stream(ctx) diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 51212812a31..6045a64d577 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -310,7 +310,8 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) { // Note: the file name is appended to "/opt//", eg. "/opt/nfs/e2e-.../". // Note: nil can be passed for the podSecContext parm, in which case it is ignored. // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` -// bytes. +// +// bytes. func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 5ec4e27856b..d5a09ba2c0d 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -474,8 +474,9 @@ func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod { // listPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods//volumes/* and // /var/lib/kubelet/pods//volumeDevices/* // Sample output: -// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt -// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0 +// +// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt +// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0 func listPodVolumePluginDirectory(h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) { mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes") devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices") diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index 84f67170ebd..46d823d8482 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -48,11 +48,11 @@ import ( // or be built into the binary. // // LoadFromManifests has some limitations: -// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount, -// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core) -// and silently ignored -// - the latest stable API version for each item is used, regardless of what -// is specified in the manifest files +// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount, +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core) +// and silently ignored +// - the latest stable API version for each item is used, regardless of what +// is specified in the manifest files func LoadFromManifests(files ...string) ([]interface{}, error) { var items []interface{} err := visitManifests(func(data []byte) error { @@ -132,10 +132,10 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ... // It returns either a cleanup function or an error, but never both. // // Cleaning up after a test can be triggered in two ways: -// - the test invokes the returned cleanup function, -// usually in an AfterEach -// - the test suite terminates, potentially after -// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222) +// - the test invokes the returned cleanup function, +// usually in an AfterEach +// - the test suite terminates, potentially after +// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222) // // PatchItems has the some limitations as LoadFromManifests: // - only some common items are supported, unknown ones trigger an error diff --git a/test/e2e/storage/utils/deployment.go b/test/e2e/storage/utils/deployment.go index 0fee58222e0..e828e2b91e3 100644 --- a/test/e2e/storage/utils/deployment.go +++ b/test/e2e/storage/utils/deployment.go @@ -33,18 +33,18 @@ import ( // // All of that is optional, see PatchCSIOptions. Just beware // that not renaming the CSI driver deployment can be problematic: -// - when multiple tests deploy the driver, they need -// to run sequentially -// - might conflict with manual deployments +// - when multiple tests deploy the driver, they need +// to run sequentially +// - might conflict with manual deployments // // This function is written so that it works for CSI driver deployments // that follow these conventions: -// - driver and provisioner names are identical -// - the driver binary accepts a --drivername parameter -// - the paths inside the container are either fixed -// and don't need to be patch (for example, --csi-address=/csi/csi.sock is -// okay) or are specified directly in a parameter (for example, -// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock) +// - driver and provisioner names are identical +// - the driver binary accepts a --drivername parameter +// - the paths inside the container are either fixed +// and don't need to be patch (for example, --csi-address=/csi/csi.sock is +// okay) or are specified directly in a parameter (for example, +// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock) // // Driver deployments that are different will have to do the patching // without this function, or skip patching entirely. diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 7b019fae9d2..0ae2f2c8bc7 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -32,21 +32,20 @@ import ( ) /* - This is a function test for Selector-Label Volume Binding Feature - Test verifies volume with the matching label is bounded with the PVC. - - Test Steps - ---------- - 1. Create VMDK. - 2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete. - 3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol - 4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd - 5. Wait and verify pvSsd is bound with PV. - 6. Verify Status of pvcVvol is still pending. - 7. Delete pvcSsd. - 8. verify associated pv is also deleted. - 9. delete pvcVvol +This is a function test for Selector-Label Volume Binding Feature +Test verifies volume with the matching label is bounded with the PVC. +Test Steps +---------- +1. Create VMDK. +2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete. +3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol +4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd +5. Wait and verify pvSsd is bound with PV. +6. Verify Status of pvcVvol is still pending. +7. Delete pvcSsd. +8. verify associated pv is also deleted. +9. delete pvcVvol */ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSelector]", func() { f := framework.NewDefaultFramework("pvclabelselector") diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index b209f76a5d6..30101a6166c 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -37,14 +37,14 @@ import ( ) /* - Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes. - The following actions will be performed as part of this test. +Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes. +The following actions will be performed as part of this test. - 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) - 2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment. - 3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes. - 4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it. - 5. Once all the go routines are completed, we delete all the pods and volumes. +1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) +2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment. +3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes. +4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it. +5. Once all the go routines are completed, we delete all the pods and volumes. */ const ( NodeLabelKey = "vsphere_e2e_label" diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index cb43337211b..3eb9065081c 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -37,14 +37,14 @@ import ( ) /* - Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread. - The following actions will be performed as part of this test. +Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread. +The following actions will be performed as part of this test. - 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) - 2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment. - 3. Launch goroutine for volume lifecycle operations. - 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS - 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. +1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) +2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment. +3. Launch goroutine for volume lifecycle operations. +4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS +5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. */ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() { f := framework.NewDefaultFramework("vcp-stress") diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index cd8b75dec68..6c539dc1cdd 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -477,7 +477,7 @@ func getPathFromVMDiskPath(vmDiskPath string) string { return datastorePathObj.Path } -//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. +// getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) @@ -513,7 +513,7 @@ func formatVirtualDiskUUID(uuid string) string { return strings.ToLower(uuidWithNoHypens) } -//isValidUUID checks if the string is a valid UUID. +// isValidUUID checks if the string is a valid UUID. func isValidUUID(uuid string) bool { r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") return r.MatchString(uuid) diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index a82f474fc57..7393b047b0b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -30,14 +30,14 @@ import ( ) /* - Tests to verify volume provisioning on a clustered datastore - 1. Static provisioning - 2. Dynamic provisioning - 3. Dynamic provisioning with spbm policy +Tests to verify volume provisioning on a clustered datastore +1. Static provisioning +2. Dynamic provisioning +3. Dynamic provisioning with spbm policy - This test reads env - 1. CLUSTER_DATASTORE which should be set to clustered datastore - 2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore +This test reads env +1. CLUSTER_DATASTORE which should be set to clustered datastore +2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore */ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-provision") diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 01561d5be30..7243dd8c298 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -90,15 +90,15 @@ func restartKubelet(host string) error { } /* - Test to verify volume remains attached after kubelet restart on master node - For the number of schedulable nodes, - 1. Create a volume with default volume options - 2. Create a Pod - 3. Verify the volume is attached - 4. Restart the kubelet on master node - 5. Verify again that the volume is attached - 6. Delete the pod and wait for the volume to be detached - 7. Delete the volume +Test to verify volume remains attached after kubelet restart on master node +For the number of schedulable nodes, +1. Create a volume with default volume options +2. Create a Pod +3. Verify the volume is attached +4. Restart the kubelet on master node +5. Verify again that the volume is attached +6. Delete the pod and wait for the volume to be detached +7. Delete the volume */ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() { f := framework.NewDefaultFramework("restart-master") diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 0a95c466488..6037a3e6e56 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -41,9 +41,9 @@ import ( ) /* - Test to verify volume status after node power off: - 1. Verify the pod got provisioned on a different node with volume attached to it - 2. Verify the volume is detached from the powered off node +Test to verify volume status after node power off: +1. Verify the pod got provisioned on a different node with volume attached to it +2. Verify the volume is detached from the powered off node */ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() { f := framework.NewDefaultFramework("node-poweroff") diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index c2adc27cfc6..31fbb064fbe 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -36,7 +36,9 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -/* This test calculates latency numbers for volume lifecycle operations +/* + This test calculates latency numbers for volume lifecycle operations + 1. Create 4 type of storage classes 2. Read the total number of volumes to be created and volumes per pod 3. Create total PVCs (number of volumes) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 892a10e6643..a77273706ce 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -38,19 +38,19 @@ import ( ) /* - Test to verify that a volume remains attached through vpxd restart. +Test to verify that a volume remains attached through vpxd restart. - For the number of schedulable nodes: - 1. Create a Volume with default options. - 2. Create a Pod with the created Volume. - 3. Verify that the Volume is attached. - 4. Create a file with random contents under the Volume's mount point on the Pod. - 5. Stop the vpxd service on the vCenter host. - 6. Verify that the file is accessible on the Pod and that it's contents match. - 7. Start the vpxd service on the vCenter host. - 8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match. - 9. Delete the Pod and wait for the Volume to be detached. - 10. Delete the Volume. +For the number of schedulable nodes: +1. Create a Volume with default options. +2. Create a Pod with the created Volume. +3. Verify that the Volume is attached. +4. Create a file with random contents under the Volume's mount point on the Pod. +5. Stop the vpxd service on the vCenter host. +6. Verify that the file is accessible on the Pod and that it's contents match. +7. Start the vpxd service on the vCenter host. +8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match. +9. Delete the Pod and wait for the Volume to be detached. +10. Delete the Volume. */ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive]", func() { f := framework.NewDefaultFramework("restart-vpxd") diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 6b948b3bf2d..e26ba1062ab 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -511,10 +511,12 @@ type podEvictSpec struct { } // runEvictionTest sets up a testing environment given the provided pods, and checks a few things: -// It ensures that the desired expectedNodeCondition is actually triggered. -// It ensures that evictionPriority 0 pods are not evicted -// It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.) -// It ensures that all pods with non-zero evictionPriority are eventually evicted. +// +// It ensures that the desired expectedNodeCondition is actually triggered. +// It ensures that evictionPriority 0 pods are not evicted +// It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.) +// It ensures that all pods with non-zero evictionPriority are eventually evicted. +// // runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) { // Place the remainder of the test within a context so that the kubelet config is set before and after the test. diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 6b15d508c91..6fb340164ea 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -137,11 +137,12 @@ var _ = SIGDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func( }) // Tests the following: -// pods are created, and all containers restart the specified number of times -// while containers are running, the number of copies of a single container does not exceed maxPerPodContainer -// while containers are running, the total number of containers does not exceed maxTotalContainers -// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container -// once pods are killed, all containers are eventually cleaned up +// +// pods are created, and all containers restart the specified number of times +// while containers are running, the number of copies of a single container does not exceed maxPerPodContainer +// while containers are running, the total number of containers does not exceed maxTotalContainers +// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container +// once pods are killed, all containers are eventually cleaned up func containerGCTest(f *framework.Framework, test testRun) { var runtime internalapi.RuntimeService ginkgo.BeforeEach(func() { diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index b3af2d47ed9..e7b17dcde5e 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -263,7 +263,7 @@ func getSRIOVDevicePluginImage() (string, error) { return ds.Spec.Template.Spec.Containers[0].Image, nil } -//TODO generilize this function with above one +// TODO generilize this function with above one // getKubeVirtDevicePluginImage returns the image of SRIOV device plugin. func getKubeVirtDevicePluginImage() (string, error) { data, err := e2etestfiles.Read(KubeVirtDevicePluginDSYAML) diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index fa029331df5..5d62365421e 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -65,7 +65,7 @@ type memoryManagerCtnAttributes struct { hugepages2Mi string } -// makeMemoryManagerContainers returns slice of containers with provided attributes and indicator of hugepages mount needed for those. +// makeMemoryManagerContainers returns slice of containers with provided attributes and indicator of hugepages mount needed for those. func makeMemoryManagerContainers(ctnCmd string, ctnAttributes []memoryManagerCtnAttributes) ([]v1.Container, bool) { hugepagesMount := false var containers []v1.Container diff --git a/test/e2e_node/runner/remote/run_remote.go b/test/e2e_node/runner/remote/run_remote.go index c82923b5b32..c40ee168734 100644 --- a/test/e2e_node/runner/remote/run_remote.go +++ b/test/e2e_node/runner/remote/run_remote.go @@ -130,12 +130,13 @@ type TestResult struct { // specifying the `--image-config-file` flag, pointing to a json or yaml file // of the form: // -// images: -// short-name: -// image: gce-image-name -// project: gce-image-project -// machine: for benchmark only, the machine type (GCE instance) to run test -// tests: for benchmark only, a list of ginkgo focus strings to match tests +// images: +// short-name: +// image: gce-image-name +// project: gce-image-project +// machine: for benchmark only, the machine type (GCE instance) to run test +// tests: for benchmark only, a list of ginkgo focus strings to match tests +// // TODO(coufon): replace 'image' with 'node' in configurations // and we plan to support testing custom machines other than GCE by specifying host type ImageConfig struct { diff --git a/test/images/agnhost/net/nat/closewait.go b/test/images/agnhost/net/nat/closewait.go index 5a344956e5c..5326ddca00b 100644 --- a/test/images/agnhost/net/nat/closewait.go +++ b/test/images/agnhost/net/nat/closewait.go @@ -38,6 +38,7 @@ import ( // leakedConnection is a global variable that should leak the active // connection assigned here. +// //nolint:unused // U1000 intentional unused variable var leakedConnection *net.TCPConn diff --git a/test/images/agnhost/nettest/nettest.go b/test/images/agnhost/nettest/nettest.go index 600dd77feff..434fc51d11f 100644 --- a/test/images/agnhost/nettest/nettest.go +++ b/test/images/agnhost/nettest/nettest.go @@ -314,7 +314,7 @@ func contactOthers(state *State) { } } -//getWebserverEndpoints returns the webserver endpoints as a set of String, each in the format like "http://{ip}:{port}" +// getWebserverEndpoints returns the webserver endpoints as a set of String, each in the format like "http://{ip}:{port}" func getWebserverEndpoints(client clientset.Interface) sets.String { endpoints, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), service, v1.GetOptions{}) eps := sets.String{} diff --git a/test/images/apparmor-loader/loader.go b/test/images/apparmor-loader/loader.go index a6cf0d4ed56..4cb3ed35876 100644 --- a/test/images/apparmor-loader/loader.go +++ b/test/images/apparmor-loader/loader.go @@ -229,7 +229,8 @@ func resolveSymlink(basePath string, entry os.DirEntry) (os.FileInfo, error) { } // TODO: This is copied from k8s.io/kubernetes/pkg/security/apparmor.getLoadedProfiles. -// Refactor that method to expose it in a reusable way, and delete this version. +// +// Refactor that method to expose it in a reusable way, and delete this version. func getLoadedProfiles() (map[string]bool, error) { profilesPath := path.Join(apparmorfs, "profiles") profilesFile, err := os.Open(profilesPath) @@ -252,8 +253,10 @@ func getLoadedProfiles() (map[string]bool, error) { } // The profiles file is formatted with one profile per line, matching a form: -// namespace://profile-name (mode) -// profile-name (mode) +// +// namespace://profile-name (mode) +// profile-name (mode) +// // Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced // profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name. func parseProfileName(profileLine string) string { diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index eee81848208..143f05f406b 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -1716,7 +1716,8 @@ func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndp } // localhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw diff --git a/test/integration/apiserver/flowcontrol/fight_test.go b/test/integration/apiserver/flowcontrol/fight_test.go index 84c0ad71ad4..e4c9645b75c 100644 --- a/test/integration/apiserver/flowcontrol/fight_test.go +++ b/test/integration/apiserver/flowcontrol/fight_test.go @@ -40,22 +40,24 @@ import ( testclocks "k8s.io/utils/clock/testing" ) -/* fightTest configures a test of how API Priority and Fairness config - controllers fight when they disagree on how to set FlowSchemaStatus. - In particular, they set the condition that indicates integrity of - the reference to the PriorityLevelConfiguration. The scenario tested is - two teams of controllers, where the controllers in one team set the - condition normally and the controllers in the other team set the condition - to the opposite value. +/* +fightTest configures a test of how API Priority and Fairness config - This is a behavioral test: it instantiates these controllers and runs them - almost normally. The test aims to run the controllers for a little under - 2 minutes. The test takes clock readings to get upper and lower bounds on - how long each controller ran, and calculates consequent bounds on the number - of writes that should happen to each FlowSchemaStatus. The test creates - an informer to observe the writes. The calculated lower bound on the - number of writes is very lax, assuming only that one write can be done - every 10 seconds. + controllers fight when they disagree on how to set FlowSchemaStatus. + In particular, they set the condition that indicates integrity of + the reference to the PriorityLevelConfiguration. The scenario tested is + two teams of controllers, where the controllers in one team set the + condition normally and the controllers in the other team set the condition + to the opposite value. + + This is a behavioral test: it instantiates these controllers and runs them + almost normally. The test aims to run the controllers for a little under + 2 minutes. The test takes clock readings to get upper and lower bounds on + how long each controller ran, and calculates consequent bounds on the number + of writes that should happen to each FlowSchemaStatus. The test creates + an informer to observe the writes. The calculated lower bound on the + number of writes is very lax, assuming only that one write can be done + every 10 seconds. */ type fightTest struct { t *testing.T diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index f8885d84da3..ee6c94636c8 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -50,11 +50,14 @@ const ( ) // 1.2 code gets: -// quota_test.go:95: Took 4.218619579s to scale up without quota -// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute) +// +// quota_test.go:95: Took 4.218619579s to scale up without quota +// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute) +// // 1.3+ code gets: -// quota_test.go:100: Took 4.196205966s to scale up without quota -// quota_test.go:115: Took 12.021640372s to scale up with quota +// +// quota_test.go:100: Took 4.196205966s to scale up without quota +// quota_test.go:115: Took 12.021640372s to scale up with quota func TestQuota(t *testing.T) { // Set up a API server _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ diff --git a/test/integration/scheduler/plugins/plugins_test.go b/test/integration/scheduler/plugins/plugins_test.go index 57c49135d2f..da4d87ffe21 100644 --- a/test/integration/scheduler/plugins/plugins_test.go +++ b/test/integration/scheduler/plugins/plugins_test.go @@ -2092,6 +2092,7 @@ func TestPreScorePlugin(t *testing.T) { // - when waitingPods get preempted // - they should be removed from internal waitingPods map, but not physically deleted // - it'd trigger moving unschedulable Pods, but not the waitingPods themselves +// // - when waitingPods get deleted externally, it'd trigger moving unschedulable Pods func TestPreemptWithPermitPlugin(t *testing.T) { // Create a plugin registry for testing. Register a permit and a filter plugin. @@ -2342,9 +2343,9 @@ func (j *JobPlugin) PostBind(_ context.Context, state *framework.CycleState, p * } // This test simulates a typical spark job workflow. -// - N executor pods are created, but kept pending due to missing the driver pod -// - when the driver pod gets created and scheduled, proactively move the executors to activeQ -// and thus accelerate the entire job workflow. +// - N executor pods are created, but kept pending due to missing the driver pod +// - when the driver pod gets created and scheduled, proactively move the executors to activeQ +// and thus accelerate the entire job workflow. func TestActivatePods(t *testing.T) { var jobPlugin *JobPlugin // Create a plugin registry for testing. Register a Job plugin. diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 125edfe0ab9..e141c227366 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -147,12 +147,14 @@ type params struct { // UnmarshalJSON is a custom unmarshaler for params. // // from(json): -// { -// "initNodes": 500, -// "initPods": 50 -// } +// +// { +// "initNodes": 500, +// "initPods": 50 +// } // // to: +// // params{ // params: map[string]int{ // "intNodes": 500, @@ -160,7 +162,6 @@ type params struct { // }, // isUsed: map[string]bool{}, // empty map // } -// func (p *params) UnmarshalJSON(b []byte) error { aux := map[string]int{} diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 60f8c408d44..9aff06cee07 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -47,14 +47,18 @@ import ( // Several tests in this file are configurable by environment variables: // KUBE_INTEGRATION_PV_OBJECTS - nr. of PVs/PVCs to be created -// (100 by default) -// KUBE_INTEGRATION_PV_SYNC_PERIOD - volume controller sync period -// (1s by default) -// KUBE_INTEGRATION_PV_END_SLEEP - for how long should -// TestPersistentVolumeMultiPVsPVCs sleep when it's finished (0s by -// default). This is useful to test how long does it take for periodic sync -// to process bound PVs/PVCs. // +// (100 by default) +// +// KUBE_INTEGRATION_PV_SYNC_PERIOD - volume controller sync period +// +// (1s by default) +// +// KUBE_INTEGRATION_PV_END_SLEEP - for how long should +// +// TestPersistentVolumeMultiPVsPVCs sleep when it's finished (0s by +// default). This is useful to test how long does it take for periodic sync +// to process bound PVs/PVCs. const defaultObjectCount = 100 const defaultSyncPeriod = 1 * time.Second diff --git a/test/utils/admission_webhook.go b/test/utils/admission_webhook.go index dbec166d2e7..5114e9951ac 100644 --- a/test/utils/admission_webhook.go +++ b/test/utils/admission_webhook.go @@ -86,7 +86,8 @@ func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview) } // LocalhostCert was generated from crypto/tls/generate_cert.go with the following command: -// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +// +// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var LocalhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw diff --git a/test/utils/harness/harness.go b/test/utils/harness/harness.go index 6832164aefe..691d6cd0d48 100644 --- a/test/utils/harness/harness.go +++ b/test/utils/harness/harness.go @@ -28,11 +28,13 @@ import ( // // Example usage: // ``` -// func MyTest(tt *testing.T) { -// t := harness.For(tt) -// defer t.Close() -// ... -// } +// +// func MyTest(tt *testing.T) { +// t := harness.For(tt) +// defer t.Close() +// ... +// } +// // ``` type Harness struct { *testing.T