mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-11 22:20:18 +00:00
Sticky Session Implementation
- Added process to cleanup stale session affinity records - Automatically set cloud provided load balancer for sticky session if the service requires it - Note, this only works on GCE right now. - Changed sessionAffinityMap a map to pointers instead of structs to improve performance - Commented out cookie and protocol from sessionAffinityDetail to avoid confusion as it is not yet implemented.
This commit is contained in:
@@ -22,6 +22,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/golang/glog"
|
||||
@@ -32,27 +33,79 @@ var (
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type sessionAffinityDetail struct {
|
||||
clientIPAddress string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsedDTTM time.Time
|
||||
}
|
||||
|
||||
type serviceDetail struct {
|
||||
name string
|
||||
sessionAffinityType api.AffinityType
|
||||
sessionAffinityMap map[string]*sessionAffinityDetail
|
||||
stickyMaxAgeMinutes int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
endpointsMap map[string][]string
|
||||
rrIndex map[string]int
|
||||
lock sync.RWMutex
|
||||
endpointsMap map[string][]string
|
||||
rrIndex map[string]int
|
||||
serviceDtlMap map[string]serviceDetail
|
||||
}
|
||||
|
||||
func newServiceDetail(service string, sessionAffinityType api.AffinityType, stickyMaxAgeMinutes int) *serviceDetail {
|
||||
return &serviceDetail{
|
||||
name: service,
|
||||
sessionAffinityType: sessionAffinityType,
|
||||
sessionAffinityMap: make(map[string]*sessionAffinityDetail),
|
||||
stickyMaxAgeMinutes: stickyMaxAgeMinutes,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
endpointsMap: make(map[string][]string),
|
||||
rrIndex: make(map[string]int),
|
||||
endpointsMap: make(map[string][]string),
|
||||
rrIndex: make(map[string]int),
|
||||
serviceDtlMap: make(map[string]serviceDetail),
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(service string, sessionAffinityType api.AffinityType, stickyMaxAgeMinutes int) error {
|
||||
if stickyMaxAgeMinutes == 0 {
|
||||
stickyMaxAgeMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimeted instead????
|
||||
}
|
||||
if _, exists := lb.serviceDtlMap[service]; !exists {
|
||||
lb.serviceDtlMap[service] = *newServiceDetail(service, sessionAffinityType, stickyMaxAgeMinutes)
|
||||
glog.V(4).Infof("NewService. Service does not exist. So I created it: %+v", lb.serviceDtlMap[service])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// return true if this service detail is using some form of session affinity.
|
||||
func isSessionAffinity(serviceDtl serviceDetail) bool {
|
||||
//Should never be empty string, but chekcing for it to be safe.
|
||||
if serviceDtl.sessionAffinityType == "" || serviceDtl.sessionAffinityType == api.AffinityTypeNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(service string, srcAddr net.Addr) (string, error) {
|
||||
var ipaddr string
|
||||
glog.V(4).Infof("NextEndpoint. service: %s. srcAddr: %+v. Endpoints: %+v", service, srcAddr, lb.endpointsMap)
|
||||
|
||||
lb.lock.RLock()
|
||||
endpoints, exists := lb.endpointsMap[service]
|
||||
serviceDtls, exists := lb.serviceDtlMap[service]
|
||||
endpoints, _ := lb.endpointsMap[service]
|
||||
index := lb.rrIndex[service]
|
||||
sessionAffinityEnabled := isSessionAffinity(serviceDtls)
|
||||
|
||||
lb.lock.RUnlock()
|
||||
if !exists {
|
||||
return "", ErrMissingServiceEntry
|
||||
@@ -60,9 +113,37 @@ func (lb *LoadBalancerRR) NextEndpoint(service string, srcAddr net.Addr) (string
|
||||
if len(endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
if sessionAffinityEnabled {
|
||||
if _, _, err := net.SplitHostPort(srcAddr.String()); err == nil {
|
||||
ipaddr, _, _ = net.SplitHostPort(srcAddr.String())
|
||||
}
|
||||
sessionAffinity, exists := serviceDtls.sessionAffinityMap[ipaddr]
|
||||
glog.V(4).Infof("NextEndpoint. Key: %s. sessionAffinity: %+v", ipaddr, sessionAffinity)
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsedDTTM).Minutes()) < serviceDtls.stickyMaxAgeMinutes {
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsedDTTM = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint. Key: %s. sessionAffinity: %+v", ipaddr, sessionAffinity)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
endpoint := endpoints[index]
|
||||
lb.lock.Lock()
|
||||
lb.rrIndex[service] = (index + 1) % len(endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *sessionAffinityDetail
|
||||
affinity, _ = lb.serviceDtlMap[service].sessionAffinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(sessionAffinityDetail) //&sessionAffinityDetail{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
lb.serviceDtlMap[service].sessionAffinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsedDTTM = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIPAddress = ipaddr
|
||||
|
||||
glog.V(4).Infof("NextEndpoint. New Affinity key %s: %+v", ipaddr, lb.serviceDtlMap[service].sessionAffinityMap[ipaddr])
|
||||
}
|
||||
|
||||
lb.lock.Unlock()
|
||||
return endpoint, nil
|
||||
}
|
||||
@@ -89,6 +170,35 @@ func filterValidEndpoints(endpoints []string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
//remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(lb *LoadBalancerRR, service string, endpoint string) {
|
||||
for _, affinityDetail := range lb.serviceDtlMap[service].sessionAffinityMap {
|
||||
if affinityDetail.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from sessionAffinityMap for service: %s", affinityDetail.endpoint, service)
|
||||
delete(lb.serviceDtlMap[service].sessionAffinityMap, affinityDetail.clientIPAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
func updateServiceDetailMap(lb *LoadBalancerRR, service string, validEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, validEndpoint := range validEndpoints {
|
||||
allEndpoints[validEndpoint] = 1
|
||||
}
|
||||
for _, existingEndpoint := range lb.endpointsMap[service] {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(3).Infof("Delete endpoint %s for service: %s", mKey, service)
|
||||
removeSessionAffinityByEndpoint(lb, service, mKey)
|
||||
delete(lb.serviceDtlMap[service].sessionAffinityMap, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate manages the registered service endpoints.
|
||||
// Registered endpoints are updated if found in the update set or
|
||||
// unregistered if missing from the update set.
|
||||
@@ -102,7 +212,13 @@ func (lb *LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
|
||||
validEndpoints := filterValidEndpoints(endpoint.Endpoints)
|
||||
if !exists || !reflect.DeepEqual(existingEndpoints, validEndpoints) {
|
||||
glog.V(3).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", endpoint.Name, endpoint.Endpoints)
|
||||
updateServiceDetailMap(lb, endpoint.Name, validEndpoints)
|
||||
// On update can be called without NewService being called externally.
|
||||
// to be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist.
|
||||
lb.NewService(endpoint.Name, api.AffinityTypeNone, 0)
|
||||
lb.endpointsMap[endpoint.Name] = validEndpoints
|
||||
|
||||
// Reset the round-robin index.
|
||||
lb.rrIndex[endpoint.Name] = 0
|
||||
}
|
||||
@@ -113,6 +229,17 @@ func (lb *LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
|
||||
if _, exists := registeredEndpoints[k]; !exists {
|
||||
glog.V(3).Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", k, v)
|
||||
delete(lb.endpointsMap, k)
|
||||
delete(lb.serviceDtlMap, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(service string) {
|
||||
stickyMaxAgeMinutes := lb.serviceDtlMap[service].stickyMaxAgeMinutes
|
||||
for key, affinityDetail := range lb.serviceDtlMap[service].sessionAffinityMap {
|
||||
if int(time.Now().Sub(affinityDetail.lastUsedDTTM).Minutes()) >= stickyMaxAgeMinutes {
|
||||
glog.V(4).Infof("Removing client: %s from sessionAffinityMap for service: %s. Last used is greater than %d minutes....", affinityDetail.clientIPAddress, service, stickyMaxAgeMinutes)
|
||||
delete(lb.serviceDtlMap[service].sessionAffinityMap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user