mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-16 07:13:53 +00:00
Merge pull request #47058 from dcbw/remove-dead-kubelet-shaping
Automatic merge from submit-queue (batch tested with PRs 43558, 48261, 42376, 46803, 47058)
kubelet: remove unused bandwidth shaping teardown code
Since v1.5 and the removal of --configure-cbr0:
0800df74ab
"Remove the legacy networking mode --configure-cbr0"
kubelet hasn't done any shaping operations internally. They
have all been delegated to network plugins like kubenet or
external CNI plugins. But some shaping code was still left
in kubelet, so remove it now that it's unused.
@freehan @thockin @bprashanth
This commit is contained in:
commit
4b354d4877
@ -96,7 +96,6 @@ go_library(
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
@ -201,7 +200,6 @@ go_test(
|
||||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
@ -102,7 +102,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
@ -1027,10 +1026,6 @@ type Kubelet struct {
|
||||
// clusterDomain and clusterDNS.
|
||||
resolverConfig string
|
||||
|
||||
// Optionally shape the bandwidth of a pod
|
||||
// TODO: remove when kubenet plugin is ready
|
||||
shaper bandwidth.BandwidthShaper
|
||||
|
||||
// Information about the ports which are opened by daemons on Node running this Kubelet server.
|
||||
daemonEndpoints *v1.NodeDaemonEndpoints
|
||||
|
||||
@ -1632,28 +1627,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// early successful exit if pod is not bandwidth-constrained
|
||||
if !kl.shapingEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the traffic shaping for the pod's ingress and egress limits
|
||||
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if egress != nil || ingress != nil {
|
||||
if kubecontainer.IsHostNetworkPod(pod) {
|
||||
kl.recorder.Event(pod, v1.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
|
||||
} else if kl.shaper != nil {
|
||||
if len(apiPodStatus.PodIP) > 0 {
|
||||
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress)
|
||||
}
|
||||
} else {
|
||||
kl.recorder.Event(pod, v1.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -25,10 +25,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
@ -244,51 +242,6 @@ func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, sear
|
||||
return nameservers, searches, nil
|
||||
}
|
||||
|
||||
// cleanupBandwidthLimits updates the status of bandwidth-limited containers
|
||||
// and ensures that only the appropriate CIDRs are active on the node.
|
||||
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error {
|
||||
if kl.shaper == nil {
|
||||
return nil
|
||||
}
|
||||
currentCIDRs, err := kl.shaper.GetCIDRs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
possibleCIDRs := sets.String{}
|
||||
for ix := range allPods {
|
||||
pod := allPods[ix]
|
||||
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ingress == nil && egress == nil {
|
||||
glog.V(8).Infof("Not a bandwidth limited container...")
|
||||
continue
|
||||
}
|
||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
// TODO(random-liu): Cleanup status get functions. (issue #20477)
|
||||
s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status = kl.generateAPIPodStatus(pod, s)
|
||||
}
|
||||
if status.Phase == v1.PodRunning {
|
||||
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
|
||||
}
|
||||
}
|
||||
for _, cidr := range currentCIDRs {
|
||||
if !possibleCIDRs.Has(cidr) {
|
||||
glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs)
|
||||
if err := kl.shaper.Reset(cidr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncNetworkStatus updates the network state
|
||||
func (kl *Kubelet) syncNetworkStatus() {
|
||||
// For cri integration, network state will be updated in updateRuntimeUp,
|
||||
@ -327,25 +280,6 @@ func (kl *Kubelet) updatePodCIDR(cidr string) {
|
||||
kl.runtimeState.setPodCIDR(cidr)
|
||||
}
|
||||
|
||||
// shapingEnabled returns whether traffic shaping is enabled.
|
||||
func (kl *Kubelet) shapingEnabled() bool {
|
||||
// Disable shaping if a network plugin is defined and supports shaping
|
||||
if kl.networkPlugin != nil && kl.networkPlugin.Capabilities().Has(network.NET_PLUGIN_CAPABILITY_SHAPING) {
|
||||
return false
|
||||
}
|
||||
// This is not strictly true but we need to figure out how to handle
|
||||
// bandwidth shaping anyway. If the kubelet doesn't have a networkPlugin,
|
||||
// it could mean:
|
||||
// a. the kubelet is responsible for bandwidth shaping
|
||||
// b. the kubelet is using cri, and the cri has a network plugin
|
||||
// Today, the only plugin that understands bandwidth shaping is kubenet, and
|
||||
// it doesn't support bandwidth shaping when invoked through cri, so it
|
||||
// effectively boils down to letting the kubelet decide how to handle
|
||||
// shaping annotations. The combination of (cri + network plugin that
|
||||
// handles bandwidth shaping) may not work because of this.
|
||||
return true
|
||||
}
|
||||
|
||||
// syncNetworkUtil ensures the network utility are present on host.
|
||||
// Network util includes:
|
||||
// 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
)
|
||||
|
||||
func TestNodeIPParam(t *testing.T) {
|
||||
@ -184,85 +183,6 @@ func TestComposeDNSSearch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupBandwidthLimits(t *testing.T) {
|
||||
testPod := func(name, ingress string) *v1.Pod {
|
||||
pod := podWithUidNameNs("", name, "")
|
||||
|
||||
if len(ingress) != 0 {
|
||||
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
|
||||
// layer status getter function and test that function instead.
|
||||
tests := []struct {
|
||||
status *v1.PodStatus
|
||||
pods []*v1.Pod
|
||||
inputCIDRs []string
|
||||
expectResetCIDRs []string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
status: &v1.PodStatus{
|
||||
PodIP: "1.2.3.4",
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
testPod("foo", "10M"),
|
||||
testPod("bar", ""),
|
||||
},
|
||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
||||
name: "pod running",
|
||||
},
|
||||
{
|
||||
status: &v1.PodStatus{
|
||||
PodIP: "1.2.3.4",
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
testPod("foo", "10M"),
|
||||
testPod("bar", ""),
|
||||
},
|
||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||
name: "pod not running",
|
||||
},
|
||||
{
|
||||
status: &v1.PodStatus{
|
||||
PodIP: "1.2.3.4",
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
testPod("foo", ""),
|
||||
testPod("bar", ""),
|
||||
},
|
||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||
name: "no bandwidth limits",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
shaper := &bandwidth.FakeShaper{
|
||||
CIDRs: test.inputCIDRs,
|
||||
}
|
||||
|
||||
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKube.Cleanup()
|
||||
testKube.kubelet.shaper = shaper
|
||||
|
||||
for _, pod := range test.pods {
|
||||
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
|
||||
}
|
||||
|
||||
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
|
||||
assert.NoError(t, err, "test [%s]", test.name)
|
||||
assert.EqualValues(t, test.expectResetCIDRs, shaper.ResetCIDRs, "test[%s]", test.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIPTablesMark(t *testing.T) {
|
||||
tests := []struct {
|
||||
bit int
|
||||
|
@ -893,12 +893,6 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
||||
// Remove any orphaned mirror pods.
|
||||
kl.podManager.DeleteOrphanedMirrorPods()
|
||||
|
||||
// Clear out any old bandwidth rules
|
||||
err = kl.cleanupBandwidthLimits(allPods)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed cleaning up bandwidth limits: %v", err)
|
||||
}
|
||||
|
||||
// Remove any cgroups in the hierarchy for pods that are no longer running.
|
||||
if kl.cgroupsPerQOS {
|
||||
kl.cleanupOrphanedPodCgroups(cgroupPods, activePods)
|
||||
|
@ -304,7 +304,7 @@ func (plugin *kubenetNetworkPlugin) Name() string {
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int {
|
||||
return utilsets.NewInt(network.NET_PLUGIN_CAPABILITY_SHAPING)
|
||||
return utilsets.NewInt()
|
||||
}
|
||||
|
||||
// setup sets up networking through CNI using the given ns/name and sandbox ID.
|
||||
|
@ -125,7 +125,7 @@ func TestGetPodNetworkStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestTeardownBeforeSetUp tests that a `TearDown` call does call
|
||||
// TestTeardownCallsShaper tests that a `TearDown` call does call
|
||||
// `shaper.Reset`
|
||||
func TestTeardownCallsShaper(t *testing.T) {
|
||||
fexec := &exec.FakeExec{
|
||||
|
@ -43,12 +43,6 @@ const DefaultPluginName = "kubernetes.io/no-op"
|
||||
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change"
|
||||
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr"
|
||||
|
||||
// Plugin capabilities
|
||||
const (
|
||||
// Indicates the plugin handles Kubernetes bandwidth shaping annotations internally
|
||||
NET_PLUGIN_CAPABILITY_SHAPING int = 1
|
||||
)
|
||||
|
||||
// Plugin is an interface to network plugins for the kubelet
|
||||
type NetworkPlugin interface {
|
||||
// Init initializes the plugin. This will be called exactly once
|
||||
|
Loading…
Reference in New Issue
Block a user