mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #35523 from luxas/remove_reconcile
Automatic merge from submit-queue Deprecate the --reconcile-cidr flag <!-- Thanks for sending a pull request! Here are some tips for you: 1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md 2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md 3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes --> **What this PR does / why we need it**: Follows up https://github.com/kubernetes/kubernetes/pull/34906 **Special notes for your reviewer**: I'm not sure why coreos had set `--reconcile-cidr` to `false` and what the implications are now. **Release note**: <!-- Steps to write your release note: 1. Use the release-note-* labels to set the release note state (if you have access) 2. Enter your extended release note in the below block; leaving it blank means using the PR title as the release note. If no release note is required, just write `NONE`. --> ```release-note Deprecate the --reconcile-cidr kubelet flag because it has no function anymore ``` PTAL @thockin @freehan @justinsb @yujuhong @kubernetes/sig-node @kubernetes/sig-network **I will add `hack/update-all.sh` contents soon to fix builds**
This commit is contained in:
commit
10061ac358
@ -123,8 +123,7 @@ coreos:
|
|||||||
--logtostderr=true \
|
--logtostderr=true \
|
||||||
--container-runtime=docker \
|
--container-runtime=docker \
|
||||||
--pod-cidr=${MASTER_IP_RANGE} \
|
--pod-cidr=${MASTER_IP_RANGE} \
|
||||||
--register-schedulable=false \
|
--register-schedulable=false
|
||||||
--reconcile-cidr=false
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
KillMode=process
|
KillMode=process
|
||||||
|
@ -183,8 +183,7 @@ coreos:
|
|||||||
--rkt-path=/opt/rkt/rkt \
|
--rkt-path=/opt/rkt/rkt \
|
||||||
--rkt-stage1-image=${RKT_STAGE_IMAGE} \
|
--rkt-stage1-image=${RKT_STAGE_IMAGE} \
|
||||||
--pod-cidr=${MASTER_IP_RANGE} \
|
--pod-cidr=${MASTER_IP_RANGE} \
|
||||||
--register-schedulable=false \
|
--register-schedulable=false
|
||||||
--reconcile-cidr=false
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
KillMode=process
|
KillMode=process
|
||||||
|
@ -102,8 +102,7 @@ coreos:
|
|||||||
--cluster-domain=${DNS_DOMAIN} \
|
--cluster-domain=${DNS_DOMAIN} \
|
||||||
--logtostderr=true \
|
--logtostderr=true \
|
||||||
--container-runtime=docker \
|
--container-runtime=docker \
|
||||||
--network-plugin=${NETWORK_PROVIDER} \
|
--network-plugin=${NETWORK_PROVIDER}
|
||||||
--reconcile-cidr=true
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
KillMode=process
|
KillMode=process
|
||||||
|
@ -148,8 +148,7 @@ coreos:
|
|||||||
--container-runtime=rkt \
|
--container-runtime=rkt \
|
||||||
--rkt-path=/opt/rkt/rkt \
|
--rkt-path=/opt/rkt/rkt \
|
||||||
--rkt-stage1-image=${RKT_STAGE1_IMAGE} \
|
--rkt-stage1-image=${RKT_STAGE1_IMAGE} \
|
||||||
--network-plugin=kubenet \
|
--network-plugin=kubenet
|
||||||
--reconcile-cidr=true
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
KillMode=process
|
KillMode=process
|
||||||
|
@ -479,7 +479,6 @@ function start-kubelet {
|
|||||||
if [[ -n "${KUBELET_PORT:-}" ]]; then
|
if [[ -n "${KUBELET_PORT:-}" ]]; then
|
||||||
flags+=" --port=${KUBELET_PORT}"
|
flags+=" --port=${KUBELET_PORT}"
|
||||||
fi
|
fi
|
||||||
local reconcile_cidr="true"
|
|
||||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
|
||||||
flags+=" --enable-debugging-handlers=false"
|
flags+=" --enable-debugging-handlers=false"
|
||||||
flags+=" --hairpin-mode=none"
|
flags+=" --hairpin-mode=none"
|
||||||
@ -490,7 +489,6 @@ function start-kubelet {
|
|||||||
# TODO: determine if we still allow non-hostnetwork pods to run on master, clean up master pod setup
|
# TODO: determine if we still allow non-hostnetwork pods to run on master, clean up master pod setup
|
||||||
# WARNING: potential ip range collision with 10.123.45.0/29
|
# WARNING: potential ip range collision with 10.123.45.0/29
|
||||||
flags+=" --pod-cidr=10.123.45.0/29"
|
flags+=" --pod-cidr=10.123.45.0/29"
|
||||||
reconcile_cidr="false"
|
|
||||||
else
|
else
|
||||||
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
||||||
fi
|
fi
|
||||||
@ -512,7 +510,6 @@ function start-kubelet {
|
|||||||
fi
|
fi
|
||||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||||
fi
|
fi
|
||||||
flags+=" --reconcile-cidr=${reconcile_cidr}"
|
|
||||||
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
||||||
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
||||||
fi
|
fi
|
||||||
|
@ -155,7 +155,7 @@ assemble_kubelet_flags() {
|
|||||||
if [ ! -z "${KUBELET_APISERVER:-}" ] && \
|
if [ ! -z "${KUBELET_APISERVER:-}" ] && \
|
||||||
[ ! -z "${KUBELET_CERT:-}" ] && \
|
[ ! -z "${KUBELET_CERT:-}" ] && \
|
||||||
[ ! -z "${KUBELET_KEY:-}" ]; then
|
[ ! -z "${KUBELET_KEY:-}" ]; then
|
||||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false --reconcile-cidr=false --pod-cidr=10.123.45.0/29"
|
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false --pod-cidr=10.123.45.0/29"
|
||||||
else
|
else
|
||||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}"
|
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}"
|
||||||
fi
|
fi
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
|
|
||||||
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
|
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
|
||||||
|
|
||||||
{% set reconcile_cidr_args = "" -%}
|
|
||||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||||
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] -%}
|
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] -%}
|
||||||
|
|
||||||
@ -35,7 +34,6 @@
|
|||||||
{% if grains.kubelet_api_servers is defined -%}
|
{% if grains.kubelet_api_servers is defined -%}
|
||||||
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
||||||
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
||||||
{% set reconcile_cidr_args = "--reconcile-cidr=false" -%}
|
|
||||||
{% else -%}
|
{% else -%}
|
||||||
{% set api_servers_with_port = "" -%}
|
{% set api_servers_with_port = "" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
@ -145,9 +143,6 @@
|
|||||||
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
|
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
|
||||||
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
|
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||||
{% set network_plugin = "--network-plugin=kubenet" -%}
|
{% set network_plugin = "--network-plugin=kubenet" -%}
|
||||||
{% if reconcile_cidr_args == '' -%}
|
|
||||||
{% set reconcile_cidr_args = "--reconcile-cidr=true" -%}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
# Don't pipe the --hairpin-mode flag by default. This allows the kubelet to pick
|
# Don't pipe the --hairpin-mode flag by default. This allows the kubelet to pick
|
||||||
@ -194,4 +189,4 @@
|
|||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"
|
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"
|
||||||
|
@ -224,7 +224,8 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
|
fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
|
||||||
fs.BoolVar(&s.Containerized, "containerized", s.Containerized, "Experimental support for running kubelet in a container. Intended for testing. [default=false]")
|
fs.BoolVar(&s.Containerized, "containerized", s.Containerized, "Experimental support for running kubelet in a container. Intended for testing. [default=false]")
|
||||||
fs.Int64Var(&s.MaxOpenFiles, "max-open-files", s.MaxOpenFiles, "Number of files that can be opened by Kubelet process. [default=1000000]")
|
fs.Int64Var(&s.MaxOpenFiles, "max-open-files", s.MaxOpenFiles, "Number of files that can be opened by Kubelet process. [default=1000000]")
|
||||||
fs.BoolVar(&s.ReconcileCIDR, "reconcile-cidr", s.ReconcileCIDR, "Reconcile node CIDR with the CIDR specified by the API server. Won't have any effect if register-node is false. [default=true]")
|
fs.BoolVar(&s.ReconcileCIDR, "reconcile-cidr", s.ReconcileCIDR, "Reconcile node CIDR with the CIDR specified by the API server. This flag has no function anymore.")
|
||||||
|
fs.MarkDeprecated("reconcile-cidr", "The old --configure-cbr0 networking mode has been removed, so this flag has no function anymore. Will be removed in v1.6.")
|
||||||
fs.Var(&s.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
fs.Var(&s.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
||||||
fs.Var(&s.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
fs.Var(&s.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
||||||
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false. [default=true]")
|
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false. [default=true]")
|
||||||
|
@ -450,11 +450,6 @@ function start_kubelet {
|
|||||||
net_plugin_dir_args="--network-plugin-dir=${NET_PLUGIN_DIR}"
|
net_plugin_dir_args="--network-plugin-dir=${NET_PLUGIN_DIR}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kubenet_plugin_args=""
|
|
||||||
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
|
|
||||||
kubenet_plugin_args="--reconcile-cidr=true "
|
|
||||||
fi
|
|
||||||
|
|
||||||
container_runtime_endpoint_args=""
|
container_runtime_endpoint_args=""
|
||||||
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
|
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
|
||||||
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
|
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
|
||||||
@ -484,7 +479,6 @@ function start_kubelet {
|
|||||||
${dns_args} \
|
${dns_args} \
|
||||||
${net_plugin_dir_args} \
|
${net_plugin_dir_args} \
|
||||||
${net_plugin_args} \
|
${net_plugin_args} \
|
||||||
${kubenet_plugin_args} \
|
|
||||||
${container_runtime_endpoint_args} \
|
${container_runtime_endpoint_args} \
|
||||||
${image_service_endpoint_args} \
|
${image_service_endpoint_args} \
|
||||||
--port="$KUBELET_PORT" >"${KUBELET_LOG}" 2>&1 &
|
--port="$KUBELET_PORT" >"${KUBELET_LOG}" 2>&1 &
|
||||||
|
@ -14,9 +14,7 @@ cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
|
|||||||
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
|
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
|
||||||
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
|
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
|
||||||
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||||
cluster/gce/gci/configure-helper.sh: reconcile_cidr="false"
|
|
||||||
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
|
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
|
||||||
cluster/gce/gci/configure-helper.sh: local reconcile_cidr="true"
|
|
||||||
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
||||||
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
|
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
|
||||||
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
|
||||||
@ -122,9 +120,6 @@ test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePat
|
|||||||
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||||
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||||
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||||
test/e2e/cluster_logging_es.go: return fmt.Errorf("No cluster_name field in Elasticsearch response: %v", esResponse)
|
|
||||||
test/e2e/cluster_logging_es.go: // Check to see if have a cluster_name field.
|
|
||||||
test/e2e/cluster_logging_es.go: clusterName, ok := esResponse["cluster_name"]
|
|
||||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
|
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
|
||||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
|
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
|
||||||
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
|
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
|
||||||
|
@ -426,7 +426,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||||||
mounter: kubeDeps.Mounter,
|
mounter: kubeDeps.Mounter,
|
||||||
writer: kubeDeps.Writer,
|
writer: kubeDeps.Writer,
|
||||||
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
|
nonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR,
|
||||||
reconcileCIDR: kubeCfg.ReconcileCIDR,
|
|
||||||
maxPods: int(kubeCfg.MaxPods),
|
maxPods: int(kubeCfg.MaxPods),
|
||||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||||
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
|
nvidiaGPUs: int(kubeCfg.NvidiaGPUs),
|
||||||
@ -941,10 +940,6 @@ type Kubelet struct {
|
|||||||
containerManager cm.ContainerManager
|
containerManager cm.ContainerManager
|
||||||
nodeConfig cm.NodeConfig
|
nodeConfig cm.NodeConfig
|
||||||
|
|
||||||
// Whether or not kubelet should take responsibility for keeping cbr0 in
|
|
||||||
// the correct state.
|
|
||||||
reconcileCIDR bool
|
|
||||||
|
|
||||||
// Traffic to IPs outside this range will use IP masquerade.
|
// Traffic to IPs outside this range will use IP masquerade.
|
||||||
nonMasqueradeCIDR string
|
nonMasqueradeCIDR string
|
||||||
|
|
||||||
|
@ -333,9 +333,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
}
|
}
|
||||||
node := &nodes.Items[0]
|
node := &nodes.Items[0]
|
||||||
|
|
||||||
if kl.reconcileCIDR {
|
kl.updatePodCIDR(node.Spec.PodCIDR)
|
||||||
kl.updatePodCIDR(node.Spec.PodCIDR)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := kl.setNodeStatus(node); err != nil {
|
if err := kl.setNodeStatus(node); err != nil {
|
||||||
return err
|
return err
|
||||||
|
Loading…
Reference in New Issue
Block a user