mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Revert "Put the container bridge in promiscuous mode."
This commit is contained in:
parent
9a12c0a5ea
commit
a8b943dddd
@ -116,8 +116,8 @@ OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
|||||||
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
||||||
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
||||||
|
|
||||||
# How should the kubelet configure hairpin mode?
|
# Should the kubelet configure hairpin mode on the bridge?
|
||||||
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
|
HAIRPIN_MODE="${HAIRPIN_MODE:-true}" # true, false
|
||||||
|
|
||||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||||
|
@ -135,8 +135,8 @@ OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
|
|||||||
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
|
||||||
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
||||||
|
|
||||||
# How should the kubelet configure hairpin mode?
|
# Should the kubelet configure hairpin mode on the bridge?
|
||||||
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
|
HAIRPIN_MODE="${HAIRPIN_MODE:-true}" # true, false
|
||||||
|
|
||||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||||
|
@ -107,12 +107,12 @@
|
|||||||
{% set system_container = "" -%}
|
{% set system_container = "" -%}
|
||||||
{% set kubelet_container = "" -%}
|
{% set kubelet_container = "" -%}
|
||||||
{% set runtime_container = "" -%}
|
{% set runtime_container = "" -%}
|
||||||
{% if grains['os_family'] == 'Debian' -%}
|
{% if grains['os_family'] == 'Debian' -%}
|
||||||
{% if pillar.get('is_systemd') %}
|
{% if pillar.get('is_systemd') %}
|
||||||
{% set cgroup_root = "--cgroup-root=docker" -%}
|
{% set cgroup_root = "--cgroup-root=docker" -%}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% set cgroup_root = "--cgroup-root=/" -%}
|
{% set cgroup_root = "--cgroup-root=/" -%}
|
||||||
{% set system_container = "--system-cgroups=/system" -%}
|
{% set system_container = "--system-cgroups=/system" -%}
|
||||||
{% set runtime_container = "--runtime-cgroups=/docker-daemon" -%}
|
{% set runtime_container = "--runtime-cgroups=/docker-daemon" -%}
|
||||||
{% set kubelet_container= "--kubelet-cgroups=/kubelet" -%}
|
{% set kubelet_container= "--kubelet-cgroups=/kubelet" -%}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@ -151,11 +151,12 @@
|
|||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% set hairpin_mode = "" -%}
|
{% set configure_hairpin_mode = "--configure-hairpin-mode=true" -%}
|
||||||
{% if pillar['hairpin_mode'] is defined -%}
|
{% if pillar.get('hairpin_mode', '').lower() == 'false' %}
|
||||||
{% set hairpin_mode = "--hairpin-mode=" + pillar['hairpin_mode'] -%}
|
{% set configure_hairpin_mode = "--configure-hairpin-mode=false" %}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
|
|
||||||
{% set kubelet_port = "" -%}
|
{% set kubelet_port = "" -%}
|
||||||
{% if pillar['kubelet_port'] is defined -%}
|
{% if pillar['kubelet_port'] is defined -%}
|
||||||
{% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
{% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||||
@ -172,4 +173,4 @@
|
|||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{test_args}}"
|
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ configure_hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{test_args}}"
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
defaultRootDir = "/var/lib/kubelet"
|
defaultRootDir = "/var/lib/kubelet"
|
||||||
experimentalFlannelOverlay = false
|
experimentalFlannelOverlay = false
|
||||||
|
hairpinMode = true
|
||||||
)
|
)
|
||||||
|
|
||||||
// KubeletServer encapsulates all of the parameters necessary for starting up
|
// KubeletServer encapsulates all of the parameters necessary for starting up
|
||||||
@ -125,7 +126,7 @@ func NewKubeletServer() *KubeletServer {
|
|||||||
KubeAPIBurst: 10,
|
KubeAPIBurst: 10,
|
||||||
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
|
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
|
||||||
OutOfDiskTransitionFrequency: unversioned.Duration{5 * time.Minute},
|
OutOfDiskTransitionFrequency: unversioned.Duration{5 * time.Minute},
|
||||||
HairpinMode: componentconfig.PromiscuousBridge,
|
HairpinMode: hairpinMode,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -207,7 +208,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
|
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
|
||||||
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
|
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
|
||||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||||
fs.StringVar(&s.HairpinMode, "hairpin-mode", s.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".")
|
fs.BoolVar(&s.HairpinMode, "configure-hairpin-mode", s.HairpinMode, "If true, kubelet will set the hairpin mode flag on container interfaces. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service.")
|
||||||
fs.IntVar(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.")
|
fs.IntVar(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.")
|
||||||
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
||||||
fs.StringVar(&s.NonMasqueradeCIDR, "non-masquerade-cidr", s.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade.")
|
fs.StringVar(&s.NonMasqueradeCIDR, "non-masquerade-cidr", s.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade.")
|
||||||
|
@ -752,7 +752,7 @@ type KubeletConfig struct {
|
|||||||
ExperimentalFlannelOverlay bool
|
ExperimentalFlannelOverlay bool
|
||||||
NodeIP net.IP
|
NodeIP net.IP
|
||||||
ContainerRuntimeOptions []kubecontainer.Option
|
ContainerRuntimeOptions []kubecontainer.Option
|
||||||
HairpinMode string
|
HairpinMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
|
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
|
||||||
|
@ -77,6 +77,7 @@ kubelet
|
|||||||
--cluster-domain="": Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains
|
--cluster-domain="": Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains
|
||||||
--config="": Path to the config file or directory of files
|
--config="": Path to the config file or directory of files
|
||||||
--configure-cbr0[=false]: If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.
|
--configure-cbr0[=false]: If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.
|
||||||
|
--configure-hairpin-mode[=true]: If true, kubelet will set the hairpin mode flag on container interfaces. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service.
|
||||||
--container-runtime="docker": The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.
|
--container-runtime="docker": The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.
|
||||||
--containerized[=false]: Experimental support for running kubelet in a container. Intended for testing. [default=false]
|
--containerized[=false]: Experimental support for running kubelet in a container. Intended for testing. [default=false]
|
||||||
--cpu-cfs-quota[=true]: Enable CPU CFS quota enforcement for containers that specify CPU limits
|
--cpu-cfs-quota[=true]: Enable CPU CFS quota enforcement for containers that specify CPU limits
|
||||||
@ -90,7 +91,6 @@ kubelet
|
|||||||
--experimental-flannel-overlay[=false]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
|
--experimental-flannel-overlay[=false]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
|
||||||
--file-check-frequency=20s: Duration between checking config files for new data
|
--file-check-frequency=20s: Duration between checking config files for new data
|
||||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||||
--hairpin-mode="promiscuous-bridge": How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are "promiscuous-bridge", "hairpin-veth" and "none".
|
|
||||||
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||||
--healthz-port=10248: The port of the localhost healthz endpoint
|
--healthz-port=10248: The port of the localhost healthz endpoint
|
||||||
--host-ipc-sources="*": Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. [default="*"]
|
--host-ipc-sources="*": Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. [default="*"]
|
||||||
@ -152,7 +152,7 @@ kubelet
|
|||||||
--volume-stats-agg-period=1m0s: Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m'
|
--volume-stats-agg-period=1m0s: Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m'
|
||||||
```
|
```
|
||||||
|
|
||||||
###### Auto generated by spf13/cobra on 18-Feb-2016
|
###### Auto generated by spf13/cobra on 17-Feb-2016
|
||||||
|
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||||
|
@ -96,6 +96,7 @@
|
|||||||
branch: 'master'
|
branch: 'master'
|
||||||
provider-env: '{gce-provider-env}'
|
provider-env: '{gce-provider-env}'
|
||||||
job-env: |
|
job-env: |
|
||||||
|
export HAIRPIN_MODE="false"
|
||||||
export PROJECT="k8s-jkns-gce-soak"
|
export PROJECT="k8s-jkns-gce-soak"
|
||||||
- 'gce-2':
|
- 'gce-2':
|
||||||
deploy-description: Clone of kubernetes-soak-weekly-deploy-gce.
|
deploy-description: Clone of kubernetes-soak-weekly-deploy-gce.
|
||||||
@ -103,7 +104,6 @@
|
|||||||
branch: 'master'
|
branch: 'master'
|
||||||
provider-env: '{gce-provider-env}'
|
provider-env: '{gce-provider-env}'
|
||||||
job-env: |
|
job-env: |
|
||||||
export HAIRPIN_MODE="hairpin-veth"
|
|
||||||
export PROJECT="k8s-jkns-gce-soak-2"
|
export PROJECT="k8s-jkns-gce-soak-2"
|
||||||
- 'gce-1.1':
|
- 'gce-1.1':
|
||||||
deploy-description: |
|
deploy-description: |
|
||||||
|
@ -108,4 +108,3 @@ test/e2e/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
|||||||
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
|
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
|
||||||
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
|
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
|
||||||
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
|
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
|
||||||
pkg/kubelet/network/hairpin/hairpin.go: hairpinModeRelativePath = "hairpin_mode"
|
|
||||||
|
@ -389,5 +389,5 @@ leader-elect-lease-duration
|
|||||||
leader-elect-renew-deadline
|
leader-elect-renew-deadline
|
||||||
leader-elect-retry-period
|
leader-elect-retry-period
|
||||||
watch-cache-sizes
|
watch-cache-sizes
|
||||||
|
configure-hairpin-mode
|
||||||
lock-file
|
lock-file
|
||||||
hairpin-mode
|
|
||||||
|
@ -1094,32 +1094,6 @@ func (x *ProxyMode) CodecDecodeSelf(d *codec1978.Decoder) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x HairpinMode) CodecEncodeSelf(e *codec1978.Encoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperEncoder(e)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
yym1 := z.EncBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.EncExt(x) {
|
|
||||||
} else {
|
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *HairpinMode) CodecDecodeSelf(d *codec1978.Decoder) {
|
|
||||||
var h codecSelfer1234
|
|
||||||
z, r := codec1978.GenHelperDecoder(d)
|
|
||||||
_, _, _ = h, z, r
|
|
||||||
yym1 := z.DecBinary()
|
|
||||||
_ = yym1
|
|
||||||
if false {
|
|
||||||
} else if z.HasExtensions() && z.DecExt(x) {
|
|
||||||
} else {
|
|
||||||
*((*string)(x)) = r.DecodeString()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||||
var h codecSelfer1234
|
var h codecSelfer1234
|
||||||
z, r := codec1978.GenHelperEncoder(e)
|
z, r := codec1978.GenHelperEncoder(e)
|
||||||
@ -2380,17 +2354,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||||||
_ = yym194
|
_ = yym194
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode))
|
r.EncodeBool(bool(x.HairpinMode))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||||
r.EncodeString(codecSelferC_UTF81234, string("hairpinMode"))
|
r.EncodeString(codecSelferC_UTF81234, string("configureHairpinMode"))
|
||||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||||
yym195 := z.EncBinary()
|
yym195 := z.EncBinary()
|
||||||
_ = yym195
|
_ = yym195
|
||||||
if false {
|
if false {
|
||||||
} else {
|
} else {
|
||||||
r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode))
|
r.EncodeBool(bool(x.HairpinMode))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if yyr2 || yy2arr2 {
|
if yyr2 || yy2arr2 {
|
||||||
@ -3244,11 +3218,11 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
|
|||||||
} else {
|
} else {
|
||||||
x.ConfigureCBR0 = bool(r.DecodeBool())
|
x.ConfigureCBR0 = bool(r.DecodeBool())
|
||||||
}
|
}
|
||||||
case "hairpinMode":
|
case "configureHairpinMode":
|
||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.HairpinMode = ""
|
x.HairpinMode = false
|
||||||
} else {
|
} else {
|
||||||
x.HairpinMode = string(r.DecodeString())
|
x.HairpinMode = bool(r.DecodeBool())
|
||||||
}
|
}
|
||||||
case "maxPods":
|
case "maxPods":
|
||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
@ -4399,9 +4373,9 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco
|
|||||||
}
|
}
|
||||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||||
if r.TryDecodeAsNil() {
|
if r.TryDecodeAsNil() {
|
||||||
x.HairpinMode = ""
|
x.HairpinMode = false
|
||||||
} else {
|
} else {
|
||||||
x.HairpinMode = string(r.DecodeString())
|
x.HairpinMode = bool(r.DecodeBool())
|
||||||
}
|
}
|
||||||
yyj91++
|
yyj91++
|
||||||
if yyhl91 {
|
if yyhl91 {
|
||||||
|
@ -78,24 +78,6 @@ const (
|
|||||||
ProxyModeIPTables ProxyMode = "iptables"
|
ProxyModeIPTables ProxyMode = "iptables"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HairpinMode denotes how the kubelet should configure networking to handle
|
|
||||||
// hairpin packets.
|
|
||||||
type HairpinMode string
|
|
||||||
|
|
||||||
// Enum settings for different ways to handle hairpin packets.
|
|
||||||
const (
|
|
||||||
// Set the hairpin flag on the veth of containers in the respective
|
|
||||||
// container runtime.
|
|
||||||
HairpinVeth = "hairpin-veth"
|
|
||||||
// Make the container bridge promiscuous. This will force it to accept
|
|
||||||
// hairpin packets, even if the flag isn't set on ports of the bridge.
|
|
||||||
PromiscuousBridge = "promiscuous-bridge"
|
|
||||||
// Neither of the above. If the kubelet is started in this hairpin mode
|
|
||||||
// and kube-proxy is running in iptables mode, hairpin packets will be
|
|
||||||
// dropped by the container bridge.
|
|
||||||
HairpinNone = "none"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: curate the ordering and structure of this config object
|
// TODO: curate the ordering and structure of this config object
|
||||||
type KubeletConfiguration struct {
|
type KubeletConfiguration struct {
|
||||||
// config is the path to the config file or directory of files
|
// config is the path to the config file or directory of files
|
||||||
@ -270,16 +252,11 @@ type KubeletConfiguration struct {
|
|||||||
// configureCBR0 enables the kublet to configure cbr0 based on
|
// configureCBR0 enables the kublet to configure cbr0 based on
|
||||||
// Node.Spec.PodCIDR.
|
// Node.Spec.PodCIDR.
|
||||||
ConfigureCBR0 bool `json:"configureCbr0"`
|
ConfigureCBR0 bool `json:"configureCbr0"`
|
||||||
// How should the kubelet configure the container bridge for hairpin packets.
|
// Should the kubelet set the hairpin flag on veth interfaces for containers
|
||||||
// Setting this flag allows endpoints in a Service to loadbalance back to
|
// it creates? Setting this flag allows endpoints in a Service to
|
||||||
// themselves if they should try to access their own Service. Values:
|
// loadbalance back to themselves if they should try to access their own
|
||||||
// "promiscuous-bridge": make the container bridge promiscuous.
|
// Service.
|
||||||
// "hairpin-veth": set the hairpin flag on container veth interfaces.
|
HairpinMode bool `json:"configureHairpinMode"`
|
||||||
// "none": do nothing.
|
|
||||||
// Setting --configure-cbr0 to false implies that to achieve hairpin NAT
|
|
||||||
// one must set --hairpin-mode=veth-flag, because bridge assumes the
|
|
||||||
// existence of a container bridge named cbr0.
|
|
||||||
HairpinMode string `json:"hairpinMode"`
|
|
||||||
// maxPods is the number of pods that can run on this Kubelet.
|
// maxPods is the number of pods that can run on this Kubelet.
|
||||||
MaxPods int `json:"maxPods"`
|
MaxPods int `json:"maxPods"`
|
||||||
// dockerExecHandlerName is the handler to use when executing a command
|
// dockerExecHandlerName is the handler to use when executing a command
|
||||||
|
@ -59,7 +59,7 @@ func createCBR0(wantCIDR *net.IPNet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureCbr0(wantCIDR *net.IPNet, promiscuous bool) error {
|
func ensureCbr0(wantCIDR *net.IPNet) error {
|
||||||
exists, err := cbr0Exists()
|
exists, err := cbr0Exists()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -80,20 +80,7 @@ func ensureCbr0(wantCIDR *net.IPNet, promiscuous bool) error {
|
|||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := createCBR0(wantCIDR); err != nil {
|
return createCBR0(wantCIDR)
|
||||||
glog.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Put the container bridge into promiscuous mode to force it to accept hairpin packets.
|
|
||||||
// TODO: Remove this once the kernel bug (#20096) is fixed.
|
|
||||||
if promiscuous {
|
|
||||||
// Checking if the bridge is in promiscuous mode is as expensive and more brittle than
|
|
||||||
// simply setting the flag everytime.
|
|
||||||
if err := exec.Command("ip", "link", "set", "cbr0", "promisc", "on").Run(); err != nil {
|
|
||||||
glog.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/api/validation"
|
"k8s.io/kubernetes/pkg/api/validation"
|
||||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
@ -205,7 +204,7 @@ func NewMainKubelet(
|
|||||||
enableCustomMetrics bool,
|
enableCustomMetrics bool,
|
||||||
volumeStatsAggPeriod time.Duration,
|
volumeStatsAggPeriod time.Duration,
|
||||||
containerRuntimeOptions []kubecontainer.Option,
|
containerRuntimeOptions []kubecontainer.Option,
|
||||||
hairpinMode string,
|
hairpinMode bool,
|
||||||
) (*Kubelet, error) {
|
) (*Kubelet, error) {
|
||||||
if rootDirectory == "" {
|
if rootDirectory == "" {
|
||||||
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
||||||
@ -327,7 +326,6 @@ func NewMainKubelet(
|
|||||||
outOfDiskTransitionFrequency: outOfDiskTransitionFrequency,
|
outOfDiskTransitionFrequency: outOfDiskTransitionFrequency,
|
||||||
reservation: reservation,
|
reservation: reservation,
|
||||||
enableCustomMetrics: enableCustomMetrics,
|
enableCustomMetrics: enableCustomMetrics,
|
||||||
hairpinMode: componentconfig.HairpinMode(hairpinMode),
|
|
||||||
}
|
}
|
||||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||||
klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, volumeStatsAggPeriod)
|
klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, volumeStatsAggPeriod)
|
||||||
@ -385,7 +383,7 @@ func NewMainKubelet(
|
|||||||
imageBackOff,
|
imageBackOff,
|
||||||
serializeImagePulls,
|
serializeImagePulls,
|
||||||
enableCustomMetrics,
|
enableCustomMetrics,
|
||||||
hairpinMode == componentconfig.HairpinVeth,
|
hairpinMode,
|
||||||
containerRuntimeOptions...,
|
containerRuntimeOptions...,
|
||||||
)
|
)
|
||||||
case "rkt":
|
case "rkt":
|
||||||
@ -685,11 +683,6 @@ type Kubelet struct {
|
|||||||
|
|
||||||
// support gathering custom metrics.
|
// support gathering custom metrics.
|
||||||
enableCustomMetrics bool
|
enableCustomMetrics bool
|
||||||
|
|
||||||
// How the Kubelet should setup hairpin NAT. Can take the values: "promiscuous-bridge"
|
|
||||||
// (make cbr0 promiscuous), "hairpin-veth" (set the hairpin flag on veth interfaces)
|
|
||||||
// or "none" (do nothing).
|
|
||||||
hairpinMode componentconfig.HairpinMode
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate given node IP belongs to the current host
|
// Validate given node IP belongs to the current host
|
||||||
@ -2640,7 +2633,7 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
|
|||||||
}
|
}
|
||||||
// Set cbr0 interface address to first address in IPNet
|
// Set cbr0 interface address to first address in IPNet
|
||||||
cidr.IP.To4()[3] += 1
|
cidr.IP.To4()[3] += 1
|
||||||
if err := ensureCbr0(cidr, kl.hairpinMode == componentconfig.PromiscuousBridge); err != nil {
|
if err := ensureCbr0(cidr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if kl.shaper == nil {
|
if kl.shaper == nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user