Merge pull request #98257 from lingsamuel/etcd-lease-max-count

lease manager limit max objects attached to a lease
This commit is contained in:
Kubernetes Prow Robot 2021-02-01 14:52:27 -08:00 committed by GitHub
commit f81220975e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 64 additions and 14 deletions

View File

@ -164,6 +164,7 @@ func TestAddFlags(t *testing.T) {
HealthcheckTimeout: storagebackend.DefaultHealthcheckTimeout,
LeaseManagerConfig: etcd3.LeaseManagerConfig{
ReuseDurationSeconds: 100,
MaxObjectCount: 1000,
},
},
DefaultStorageMediaType: "application/vnd.kubernetes.protobuf",

View File

@ -23,24 +23,26 @@ import (
"go.etcd.io/etcd/clientv3"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/klog/v2"
)
const (
defaultLeaseReuseDurationSeconds = 60
largeLeaseThreshold = 5000
defaultLeaseMaxObjectCount = 1000
)
// LeaseManagerConfig is configuration for creating a lease manager.
type LeaseManagerConfig struct {
// ReuseDurationSeconds specifies time in seconds that each lease is reused
ReuseDurationSeconds int64
// MaxObjectCount specifies how many objects that a lease can attach
MaxObjectCount int64
}
// NewDefaultLeaseManagerConfig creates a LeaseManagerConfig with default values
func NewDefaultLeaseManagerConfig() LeaseManagerConfig {
return LeaseManagerConfig{
ReuseDurationSeconds: defaultLeaseReuseDurationSeconds,
MaxObjectCount: defaultLeaseMaxObjectCount,
}
}
@ -57,24 +59,29 @@ type leaseManager struct {
// The period of time in seconds and percent of TTL that each lease is
// reused. The minimum of them is used to avoid unreasonably large
// numbers.
leaseReuseDurationSeconds int64
leaseReuseDurationPercent float64
leaseAttachedObjectCount int64
leaseReuseDurationSeconds int64
leaseReuseDurationPercent float64
leaseMaxAttachedObjectCount int64
leaseAttachedObjectCount int64
}
// newDefaultLeaseManager creates a new lease manager using default setting.
func newDefaultLeaseManager(client *clientv3.Client, config LeaseManagerConfig) *leaseManager {
return newLeaseManager(client, config.ReuseDurationSeconds, 0.05)
if config.MaxObjectCount <= 0 {
config.MaxObjectCount = defaultLeaseMaxObjectCount
}
return newLeaseManager(client, config.ReuseDurationSeconds, 0.05, config.MaxObjectCount)
}
// newLeaseManager creates a new lease manager with the number of buffered
// leases, lease reuse duration in seconds and percentage. The percentage
// value x means x*100%.
func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64) *leaseManager {
func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64, maxObjectCount int64) *leaseManager {
return &leaseManager{
client: client,
leaseReuseDurationSeconds: leaseReuseDurationSeconds,
leaseReuseDurationPercent: leaseReuseDurationPercent,
client: client,
leaseReuseDurationSeconds: leaseReuseDurationSeconds,
leaseReuseDurationPercent: leaseReuseDurationPercent,
leaseMaxAttachedObjectCount: maxObjectCount,
}
}
@ -93,7 +100,7 @@ func (l *leaseManager) GetLease(ctx context.Context, ttl int64) (clientv3.LeaseI
// Currently each GetLease call only attach 1 object
l.leaseAttachedObjectCount++
if valid && sufficient {
if valid && sufficient && l.leaseAttachedObjectCount <= l.leaseMaxAttachedObjectCount {
return l.prevLeaseID, nil
}
@ -107,9 +114,6 @@ func (l *leaseManager) GetLease(ctx context.Context, ttl int64) (clientv3.LeaseI
l.prevLeaseID = lcr.ID
l.prevLeaseExpirationTime = now.Add(time.Duration(ttl) * time.Second)
// refresh count
if l.leaseAttachedObjectCount > largeLeaseThreshold {
klog.Infof("The object count for lease %x is large: %v", l.prevLeaseID, l.leaseAttachedObjectCount)
}
metrics.UpdateLeaseObjectCount(l.leaseAttachedObjectCount)
l.leaseAttachedObjectCount = 1
return lcr.ID, nil

View File

@ -1974,6 +1974,7 @@ func testSetup(t *testing.T) (context.Context, *store, *integration.ClusterV3) {
// for testing purposes. See apimachinery/pkg/util/wait/wait.go
store := newStore(cluster.RandClient(), codec, newPod, "", &prefixTransformer{prefix: []byte(defaultTestPrefix)}, true, LeaseManagerConfig{
ReuseDurationSeconds: 1,
MaxObjectCount: defaultLeaseMaxObjectCount,
})
ctx := context.Background()
return ctx, store, cluster
@ -2283,3 +2284,47 @@ func TestCount(t *testing.T) {
t.Fatalf("store.Count for resource %s: expected %d but got %d", resourceA, resourceACountExpected, resourceACountGot)
}
}
func TestLeaseMaxObjectCount(t *testing.T) {
codec := apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion)
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
store := newStore(cluster.RandClient(), codec, newPod, "", &prefixTransformer{prefix: []byte(defaultTestPrefix)}, true, LeaseManagerConfig{
ReuseDurationSeconds: defaultLeaseReuseDurationSeconds,
MaxObjectCount: 2,
})
ctx := context.Background()
defer cluster.Terminate(t)
obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", SelfLink: "testlink"}}
out := &example.Pod{}
testCases := []struct {
key string
expectAttachedCount int64
}{
{
key: "testkey1",
expectAttachedCount: 1,
},
{
key: "testkey2",
expectAttachedCount: 2,
},
{
key: "testkey3",
// We assume each time has 1 object attached to the lease
// so after granting a new lease, the recorded count is set to 1
expectAttachedCount: 1,
},
}
for _, tc := range testCases {
err := store.Create(ctx, tc.key, obj, out, 120)
if err != nil {
t.Fatalf("Set failed: %v", err)
}
if store.leaseManager.leaseAttachedObjectCount != tc.expectAttachedCount {
t.Errorf("Lease manager recorded count %v should be %v", store.leaseManager.leaseAttachedObjectCount, tc.expectAttachedCount)
}
}
}