Merge pull request #45739 from timothysc/cm_lock

Automatic merge from submit-queue (batch tested with PRs 45374, 44537, 45739, 44474, 45888)

Allow kcm and scheduler to lock on ConfigMaps.

**What this PR does / why we need it**:
Plumbs through the ability to lock on ConfigMaps through the kcm and scheduler.  

**Which issue this PR fixes** 
Fixes: #44857
Addresses issues with: #45415

**Special notes for your reviewer**:

**Release note**:

```
Add leader-election-resource-lock support to kcm and scheduler to allow for locking on ConfigMaps as well as Endpoints(default) 
```
/cc @kubernetes/sig-cluster-lifecycle-pr-reviews @jamiehannaford @bsalamat @mikedanese
This commit is contained in:
Kubernetes Submit Queue 2017-05-16 18:10:57 -07:00 committed by GitHub
commit 7f92d35f1c
11 changed files with 68 additions and 21 deletions

View File

@ -32,7 +32,6 @@ import (
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
@ -192,21 +191,20 @@ func Run(s *options.CMServer) error {
return err
}
// TODO: enable other lock types
rl := resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: "kube-system",
Name: "kube-controller-manager",
},
Client: leaderElectionClient,
LockConfig: resourcelock.ResourceLockConfig{
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
"kube-system",
"kube-controller-manager",
leaderElectionClient,
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
},
})
if err != nil {
glog.Fatalf("error creating lock: %v", err)
}
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: &rl,
Lock: rl,
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,

View File

@ -425,7 +425,9 @@ large-cluster-size-threshold
last-release-pr
leader-elect
leader-elect-lease-duration
leader-elect-lock-type
leader-elect-renew-deadline
leader-elect-resource-lock
leader-elect-retry-period
lease-duration
leave-stdin-open

View File

@ -685,6 +685,9 @@ type LeaderElectionConfiguration struct {
// acquisition and renewal of a leadership. This is only applicable if
// leader election is enabled.
RetryPeriod metav1.Duration
// resourceLock indicates the resource object type that will be used to lock
// during leader election cycles.
ResourceLock string
}
type KubeControllerManagerConfiguration struct {

View File

@ -24,6 +24,7 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/client/leaderelection/resourcelock:go_default_library",
"//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/master/ports:go_default_library",

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api"
rl "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/master/ports"
@ -193,6 +194,9 @@ func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
if obj.RetryPeriod == zero {
obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second}
}
if obj.ResourceLock == "" {
obj.ResourceLock = rl.EndpointsResourceLock
}
}
func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {

View File

@ -233,6 +233,9 @@ type LeaderElectionConfiguration struct {
// acquisition and renewal of a leadership. This is only applicable if
// leader election is enabled.
RetryPeriod metav1.Duration `json:"retryPeriod"`
// resourceLock indicates the resource object type that will be used to lock
// during leader election cycles.
ResourceLock string `json:"resourceLock"`
}
// A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true:

View File

@ -814,6 +814,7 @@ func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderE
out.LeaseDuration = in.LeaseDuration
out.RenewDeadline = in.RenewDeadline
out.RetryPeriod = in.RetryPeriod
out.ResourceLock = in.ResourceLock
return nil
}
@ -829,6 +830,7 @@ func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderE
out.LeaseDuration = in.LeaseDuration
out.RenewDeadline = in.RenewDeadline
out.RetryPeriod = in.RetryPeriod
out.ResourceLock = in.ResourceLock
return nil
}

View File

@ -284,6 +284,7 @@ func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfigur
LeaseDuration: metav1.Duration{Duration: DefaultLeaseDuration},
RenewDeadline: metav1.Duration{Duration: DefaultRenewDeadline},
RetryPeriod: metav1.Duration{Duration: DefaultRetryPeriod},
ResourceLock: rl.EndpointsResourceLock,
}
}
@ -306,4 +307,7 @@ func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet
fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+
"The duration the clients should wait between attempting acquisition and renewal "+
"of a leadership. This is only applicable if leader election is enabled.")
fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+
"The type of resource resource object that is used for locking during"+
"leader election. Supported options are `endpoints` (default) and `configmap`.")
}

View File

@ -17,6 +17,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",

View File

@ -17,12 +17,17 @@ limitations under the License.
package resourcelock
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
cs "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
@ -69,3 +74,29 @@ type Interface interface {
// into a string
Describe() string
}
// Manufacture will create a lock of a given type according to the input parameters
func New(lockType string, ns string, name string, client *cs.Clientset, rlc ResourceLockConfig) (Interface, error) {
switch lockType {
case EndpointsResourceLock:
return &EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
case ConfigMapsResourceLock:
return &ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
default:
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
}
}

View File

@ -26,7 +26,6 @@ import (
goruntime "runtime"
"strconv"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/server/healthz"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
@ -112,17 +111,16 @@ func Run(s *options.SchedulerServer) error {
return fmt.Errorf("unable to get hostname: %v", err)
}
// TODO: enable other lock types
rl := &resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: s.LockObjectNamespace,
Name: s.LockObjectName,
},
Client: kubecli,
LockConfig: resourcelock.ResourceLockConfig{
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
s.LockObjectNamespace,
s.LockObjectName,
kubecli,
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
},
})
if err != nil {
glog.Fatalf("error creating lock: %v", err)
}
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{