Merge pull request #37720 from freehan/lb-src-update

Automatic merge from submit-queue

Fix Service Update on LoadBalancerSourceRanges Field

Fixes: https://github.com/kubernetes/kubernetes/issues/33033
Also expands: https://github.com/kubernetes/kubernetes/pull/32748
This commit is contained in:
Kubernetes Submit Queue 2016-12-01 18:21:39 -08:00 committed by GitHub
commit 6abb472357
6 changed files with 98 additions and 11 deletions

View File

@ -2667,15 +2667,6 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
}
}
// TODO(freehan): allow user to update loadbalancerSourceRanges
// Only allow removing LoadBalancerSourceRanges when change service type from LoadBalancer
// to non-LoadBalancer or adding LoadBalancerSourceRanges when change service type from
// non-LoadBalancer to LoadBalancer.
if service.Spec.Type != api.ServiceTypeLoadBalancer && oldService.Spec.Type != api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeLoadBalancer && oldService.Spec.Type == api.ServiceTypeLoadBalancer {
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
}
allErrs = append(allErrs, validateServiceFields(service)...)
allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...)
return allErrs

View File

@ -6675,7 +6675,7 @@ func TestValidateServiceUpdate(t *testing.T) {
newSvc.Spec.Type = api.ServiceTypeLoadBalancer
newSvc.Spec.LoadBalancerSourceRanges = []string{"10.0.0.0/8"}
},
numErrs: 1,
numErrs: 0,
},
{
name: "update loadBalancerSourceRanges",
@ -6685,7 +6685,7 @@ func TestValidateServiceUpdate(t *testing.T) {
newSvc.Spec.Type = api.ServiceTypeLoadBalancer
newSvc.Spec.LoadBalancerSourceRanges = []string{"10.180.0.0/16"}
},
numErrs: 1,
numErrs: 0,
},
{
name: "LoadBalancer type cannot have None ClusterIP",

View File

@ -430,6 +430,13 @@ func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.S
oldService.Spec.Type, newService.Spec.Type)
return true
}
if wantsLoadBalancer(newService) && !reflect.DeepEqual(oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges) {
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "LoadBalancerSourceRanges", "%v -> %v",
oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges)
return true
}
if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity {
return true
}

View File

@ -388,6 +388,9 @@ func (proxier *Proxier) sameConfig(info *serviceInfo, service *api.Service, port
if info.onlyNodeLocalEndpoints != onlyNodeLocalEndpoints {
return false
}
if !reflect.DeepEqual(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges) {
return false
}
return true
}

View File

@ -1103,6 +1103,73 @@ var _ = framework.KubeDescribe("Services", func() {
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.name, kubeProxyLagTimeout, stdout)
}
})
It("should only allow access from service loadbalancer source ranges [Slow]", func() {
// this feature currently supported only on GCE/GKE/AWS
framework.SkipUnlessProviderIs("gce", "gke", "aws")
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber {
loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge
}
namespace := f.Namespace.Name
serviceName := "lb-sourcerange"
jig := NewServiceTestJig(cs, serviceName)
By("Prepare allow source ips")
// prepare the exec pods
// acceptPod are allowed to access the loadbalancer
acceptPodName := createExecPodOrFail(cs, namespace, "execpod-accept")
dropPodName := createExecPodOrFail(cs, namespace, "execpod-drop")
accpetPod, err := cs.Core().Pods(namespace).Get(acceptPodName)
Expect(err).NotTo(HaveOccurred())
dropPod, err := cs.Core().Pods(namespace).Get(dropPodName)
Expect(err).NotTo(HaveOccurred())
By("creating a pod to be part of the service " + serviceName)
// This container is an nginx container listening on port 80
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
jig.RunOrFail(namespace, nil)
// Create loadbalancer service with source range from node[0] and podAccept
svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.LoadBalancerSourceRanges = []string{accpetPod.Status.PodIP + "/32"}
})
// Clean up loadbalancer service
defer func() {
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.LoadBalancerSourceRanges = nil
})
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout)
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
By("check reachability from different sources")
svcIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
checkReachabilityFromPod(true, namespace, acceptPodName, svcIP)
checkReachabilityFromPod(false, namespace, dropPodName, svcIP)
By("Update service LoadBalancerSourceRange and check reachability")
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
// only allow access from dropPod
svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"}
})
checkReachabilityFromPod(false, namespace, acceptPodName, svcIP)
checkReachabilityFromPod(true, namespace, dropPodName, svcIP)
By("Delete LoadBalancerSourceRange field and check reachability")
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.LoadBalancerSourceRanges = nil
})
checkReachabilityFromPod(true, namespace, acceptPodName, svcIP)
checkReachabilityFromPod(true, namespace, dropPodName, svcIP)
})
})
var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", func() {
@ -2773,3 +2840,21 @@ func describeSvc(ns string) {
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
framework.Logf(desc)
}
func checkReachabilityFromPod(expectToBeReachable bool, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
err := wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
_, err := framework.RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil {
framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil
}
if !expectToBeReachable && err == nil {
framework.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
}

View File

@ -444,6 +444,7 @@ Services should be able to create a functioning NodePort service,bprashanth,0
Services should be able to up and down services,bprashanth,0
Services should check NodePort out-of-range,bprashanth,0
Services should create endpoints for unready pods,maisem,0
Serivces should only allow access from service loadbalancer source ranges,freehan,0
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
Services should prevent NodePort collisions,bprashanth,0
Services should provide secure master service,bprashanth,0

1 name owner auto-assigned
444 Services should be able to up and down services bprashanth 0
445 Services should check NodePort out-of-range bprashanth 0
446 Services should create endpoints for unready pods maisem 0
447 Serivces should only allow access from service loadbalancer source ranges freehan 0
448 Services should preserve source pod IP for traffic thru service cluster IP Random-Liu 1
449 Services should prevent NodePort collisions bprashanth 0
450 Services should provide secure master service bprashanth 0