mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-26 12:46:06 +00:00
Revert "Merge pull request #92312 from Sh4d1/kep_1860"
This reverts commitef16faf409
, reversing changes made to2343b8a68b
.
This commit is contained in:
@@ -39,7 +39,6 @@ go_test(
|
||||
srcs = ["proxier_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
@@ -54,8 +53,6 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
|
@@ -782,10 +782,10 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE
|
||||
klog.Errorf("Failed to delete %s endpoint connections for externalIP %s, error: %v", epSvcPair.ServicePortName.String(), extIP, err)
|
||||
}
|
||||
}
|
||||
for _, lbIngress := range svcInfo.LoadBalancerIngress() {
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, lbIngress.IP, endpointIP, svcProto)
|
||||
for _, lbIP := range svcInfo.LoadBalancerIPStrings() {
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, lbIP, endpointIP, svcProto)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete %s endpoint connections for LoabBalancerIP %s, error: %v", epSvcPair.ServicePortName.String(), lbIngress.IP, err)
|
||||
klog.Errorf("Failed to delete %s endpoint connections for LoabBalancerIP %s, error: %v", epSvcPair.ServicePortName.String(), lbIP, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1161,8 +1161,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Capture load-balancer ingress.
|
||||
fwChain := svcInfo.serviceFirewallChainName
|
||||
for _, ingress := range svcInfo.LoadBalancerIngress() {
|
||||
if ingress.IP != "" {
|
||||
for _, ingress := range svcInfo.LoadBalancerIPStrings() {
|
||||
if ingress != "" {
|
||||
if hasEndpoints {
|
||||
// create service firewall chain
|
||||
if chain, ok := existingNATChains[fwChain]; ok {
|
||||
@@ -1175,17 +1175,15 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// This currently works for loadbalancers that preserves source ips.
|
||||
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) || *ingress.IPMode == v1.LoadBalancerIPModeVIP {
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
)
|
||||
// jump to service firewall chain
|
||||
writeLine(proxier.natRules, append(args, "-j", string(fwChain))...)
|
||||
}
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
)
|
||||
// jump to service firewall chain
|
||||
writeLine(proxier.natRules, append(args, "-j", string(fwChain))...)
|
||||
|
||||
args = append(args[:0],
|
||||
"-A", string(fwChain),
|
||||
@@ -1220,7 +1218,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
writeLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), "-j", string(chosenChain))...)
|
||||
writeLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress)), "-j", string(chosenChain))...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1233,7 +1231,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-A", string(kubeExternalServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)),
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
"-j", "REJECT",
|
||||
)
|
||||
|
@@ -26,7 +26,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -35,7 +34,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
@@ -48,8 +46,6 @@ import (
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
)
|
||||
|
||||
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
|
||||
@@ -2686,85 +2682,4 @@ COMMIT
|
||||
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
|
||||
}
|
||||
|
||||
func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, true)()
|
||||
ipModeProxy := v1.LoadBalancerIPModeProxy
|
||||
ipModeVIP := v1.LoadBalancerIPModeVIP
|
||||
|
||||
testCases := []struct {
|
||||
svcIP string
|
||||
svcLBIP string
|
||||
ipMode *v1.LoadBalancerIPMode
|
||||
expectedRule bool
|
||||
}{
|
||||
{
|
||||
svcIP: "10.20.30.41",
|
||||
svcLBIP: "1.2.3.4",
|
||||
ipMode: &ipModeProxy,
|
||||
expectedRule: false,
|
||||
},
|
||||
{
|
||||
svcIP: "10.20.30.42",
|
||||
svcLBIP: "1.2.3.5",
|
||||
ipMode: &ipModeVIP,
|
||||
expectedRule: true,
|
||||
},
|
||||
}
|
||||
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt, false)
|
||||
makeServiceMap(fp,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
||||
svc.Spec.Type = "LoadBalancer"
|
||||
svc.Spec.ClusterIP = testCase.svcIP
|
||||
svc.Spec.Ports = []v1.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
NodePort: int32(svcNodePort),
|
||||
}}
|
||||
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
||||
IP: testCase.svcLBIP,
|
||||
IPMode: testCase.ipMode,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
epIP := "10.180.0.1"
|
||||
makeEndpointsMap(fp,
|
||||
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *v1.Endpoints) {
|
||||
ept.Subsets = []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{
|
||||
IP: epIP,
|
||||
}},
|
||||
Ports: []v1.EndpointPort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
proto := strings.ToLower(string(v1.ProtocolTCP))
|
||||
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if hasJump(kubeSvcRules, fwChain, testCase.svcLBIP, svcPort) != testCase.expectedRule {
|
||||
errorf(fmt.Sprintf("Found jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
|
||||
|
Reference in New Issue
Block a user