mirror of
https://github.com/rancher/dynamiclistener.git
synced 2025-09-09 17:09:52 +00:00
Compare commits
1 Commits
renovate/m
...
renovate/m
Author | SHA1 | Date | |
---|---|---|---|
|
eb8b3cb89f |
2
.github/renovate.json
vendored
2
.github/renovate.json
vendored
@@ -2,7 +2,7 @@
|
||||
"extends": [
|
||||
"github>rancher/renovate-config#release"
|
||||
],
|
||||
"baseBranches": [
|
||||
"baseBranchPatterns": [
|
||||
"main",
|
||||
"release/v0.3",
|
||||
"release/v0.4",
|
||||
|
2
.github/workflows/ci.yaml
vendored
2
.github/workflows/ci.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
# https://github.com/actions/checkout/releases/tag/VERSION
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
# https://github.com/actions/setup-go/releases/tag/VERSION
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
|
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name : Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Create release on Github
|
||||
run: |
|
||||
|
@@ -1 +0,0 @@
|
||||
* @rancher/rancher-squad-frameworks
|
@@ -195,10 +195,6 @@ func (t *TLS) generateCert(secret *v1.Secret, cn ...string) (*v1.Secret, bool, e
|
||||
}
|
||||
|
||||
func (t *TLS) IsExpired(secret *v1.Secret) bool {
|
||||
if secret == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
certsPem := secret.Data[v1.TLSCertKey]
|
||||
if len(certsPem) == 0 {
|
||||
return false
|
||||
|
12
go.mod
12
go.mod
@@ -8,7 +8,7 @@ require (
|
||||
github.com/rancher/wrangler/v3 v3.2.2-rc.3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
k8s.io/api v0.33.1
|
||||
k8s.io/apimachinery v0.33.1
|
||||
k8s.io/client-go v0.33.1
|
||||
@@ -43,12 +43,12 @@ require (
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rancher/lasso v0.2.3-rc3 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.43.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
|
28
go.sum
28
go.sum
@@ -100,43 +100,43 @@ go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@@ -32,7 +32,7 @@ func (s *storage) Get() (*v1.Secret, error) {
|
||||
}
|
||||
|
||||
func (s *storage) Update(secret *v1.Secret) error {
|
||||
f, err := os.OpenFile(s.file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
f, err := os.Create(s.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -2,47 +2,33 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/dynamiclistener"
|
||||
"github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/wrangler/v3/pkg/generated/controllers/core"
|
||||
v1controller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
|
||||
"github.com/rancher/wrangler/v3/pkg/start"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
toolswatch "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
type CoreGetter func() *core.Factory
|
||||
|
||||
type storage struct {
|
||||
namespace, name string
|
||||
storage dynamiclistener.TLSStorage
|
||||
secrets v1controller.SecretController
|
||||
tls dynamiclistener.TLSFactory
|
||||
queue workqueue.TypedInterface[string]
|
||||
queuedSecret *v1.Secret
|
||||
}
|
||||
|
||||
func Load(ctx context.Context, secrets v1controller.SecretController, namespace, name string, backing dynamiclistener.TLSStorage) dynamiclistener.TLSStorage {
|
||||
storage := &storage{
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
storage: backing,
|
||||
queue: workqueue.NewTyped[string](),
|
||||
ctx: ctx,
|
||||
initSync: &sync.Once{},
|
||||
}
|
||||
storage.runQueue()
|
||||
storage.init(ctx, secrets)
|
||||
storage.init(secrets)
|
||||
return storage
|
||||
}
|
||||
|
||||
@@ -51,16 +37,16 @@ func New(ctx context.Context, core CoreGetter, namespace, name string, backing d
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
storage: backing,
|
||||
queue: workqueue.NewTyped[string](),
|
||||
ctx: ctx,
|
||||
initSync: &sync.Once{},
|
||||
}
|
||||
storage.runQueue()
|
||||
|
||||
// lazy init
|
||||
go func() {
|
||||
wait.PollImmediateUntilWithContext(ctx, time.Second, func(cxt context.Context) (bool, error) {
|
||||
if coreFactory := core(); coreFactory != nil {
|
||||
storage.init(ctx, coreFactory.Core().V1().Secret())
|
||||
return true, nil
|
||||
storage.init(coreFactory.Core().V1().Secret())
|
||||
return true, start.All(ctx, 5, coreFactory)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
@@ -69,94 +55,100 @@ func New(ctx context.Context, core CoreGetter, namespace, name string, backing d
|
||||
return storage
|
||||
}
|
||||
|
||||
// always return secret from backing storage
|
||||
func (s *storage) Get() (*v1.Secret, error) {
|
||||
return s.storage.Get()
|
||||
}
|
||||
type storage struct {
|
||||
sync.RWMutex
|
||||
|
||||
// sync secret to Kubernetes and backing storage via workqueue
|
||||
func (s *storage) Update(secret *v1.Secret) error {
|
||||
// Asynchronously update the Kubernetes secret, as doing so inline may block the listener from
|
||||
// accepting new connections if the apiserver becomes unavailable after the Secrets controller
|
||||
// has been initialized.
|
||||
s.queuedSecret = secret
|
||||
s.queue.Add(s.name)
|
||||
return nil
|
||||
namespace, name string
|
||||
storage dynamiclistener.TLSStorage
|
||||
secrets v1controller.SecretController
|
||||
ctx context.Context
|
||||
tls dynamiclistener.TLSFactory
|
||||
initialized bool
|
||||
initSync *sync.Once
|
||||
}
|
||||
|
||||
func (s *storage) SetFactory(tls dynamiclistener.TLSFactory) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.tls = tls
|
||||
}
|
||||
|
||||
func (s *storage) init(ctx context.Context, secrets v1controller.SecretController) {
|
||||
s.secrets = secrets
|
||||
func (s *storage) init(secrets v1controller.SecretController) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// Watch just the target secret, instead of using a wrangler OnChange handler
|
||||
// which watches all secrets in all namespaces. Changes to the secret
|
||||
// will be sent through the workqueue.
|
||||
go func() {
|
||||
fieldSelector := fields.Set{"metadata.name": s.name}.String()
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return secrets.List(s.namespace, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return secrets.Watch(s.namespace, options)
|
||||
},
|
||||
secrets.OnChange(s.ctx, "tls-storage", func(key string, secret *v1.Secret) (*v1.Secret, error) {
|
||||
if secret == nil {
|
||||
return nil, nil
|
||||
}
|
||||
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Secret{})
|
||||
|
||||
defer func() {
|
||||
s.queue.ShutDown()
|
||||
watch.Stop()
|
||||
<-done
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ev := <-watch.ResultChan():
|
||||
if secret, ok := ev.Object.(*v1.Secret); ok {
|
||||
s.queuedSecret = secret
|
||||
s.queue.Add(secret.Name)
|
||||
}
|
||||
if secret.Namespace == s.namespace && secret.Name == s.name {
|
||||
if err := s.Update(secret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// enqueue initial sync of the backing secret
|
||||
s.queuedSecret, _ = s.Get()
|
||||
s.queue.Add(s.name)
|
||||
return secret, nil
|
||||
})
|
||||
s.secrets = secrets
|
||||
|
||||
// Asynchronously sync the backing storage to the Kubernetes secret, as doing so inline may
|
||||
// block the listener from accepting new connections if the apiserver becomes unavailable
|
||||
// after the Secrets controller has been initialized. We're not passing around any contexts
|
||||
// here, nor does the controller accept any, so there's no good way to soft-fail with a
|
||||
// reasonable timeout.
|
||||
go s.syncStorage()
|
||||
}
|
||||
|
||||
// runQueue starts a goroutine to process secrets updates from the workqueue
|
||||
func (s *storage) runQueue() {
|
||||
go func() {
|
||||
for s.processQueue() {
|
||||
func (s *storage) syncStorage() {
|
||||
var updateStorage bool
|
||||
secret, err := s.Get()
|
||||
if err == nil && cert.IsValidTLSSecret(secret) {
|
||||
// local storage had a cached secret, ensure that it exists in Kubernetes
|
||||
_, err := s.secrets.Create(&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.name,
|
||||
Namespace: s.namespace,
|
||||
Annotations: secret.Annotations,
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
Data: secret.Data,
|
||||
})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
logrus.Warnf("Failed to create Kubernetes secret: %v", err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// local storage was empty, try to populate it
|
||||
secret, err = s.secrets.Get(s.namespace, s.name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
logrus.Warnf("Failed to init Kubernetes secret: %v", err)
|
||||
}
|
||||
} else {
|
||||
updateStorage = true
|
||||
}
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.initialized = true
|
||||
if updateStorage {
|
||||
if err := s.storage.Update(secret); err != nil {
|
||||
logrus.Warnf("Failed to init backing storage secret: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processQueue processes the secret update queue.
|
||||
// The key doesn't actually matter, as we are only handling a single secret with a single worker.
|
||||
func (s *storage) processQueue() bool {
|
||||
key, shutdown := s.queue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
func (s *storage) Get() (*v1.Secret, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
defer s.queue.Done(key)
|
||||
if err := s.update(); err != nil {
|
||||
logrus.Errorf("Failed to update Secret %s/%s: %v", s.namespace, s.name, err)
|
||||
}
|
||||
|
||||
return true
|
||||
return s.storage.Get()
|
||||
}
|
||||
|
||||
func (s *storage) targetSecret() (*v1.Secret, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
existingSecret, err := s.secrets.Get(s.namespace, s.name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return &v1.Secret{
|
||||
@@ -170,16 +162,22 @@ func (s *storage) targetSecret() (*v1.Secret, error) {
|
||||
return existingSecret, err
|
||||
}
|
||||
|
||||
// saveInK8s handles merging the provided secret with the kubernetes secret.
|
||||
// This includes calling the tls factory to sign a new certificate with the
|
||||
// merged SAN entries, if possible. Note that the provided secret could be
|
||||
// either from Kubernetes due to the secret being changed by another client, or
|
||||
// from the listener trying to add SANs or regenerate the cert.
|
||||
func (s *storage) saveInK8s(secret *v1.Secret) (*v1.Secret, error) {
|
||||
// secret controller not initialized yet, just return the current secret.
|
||||
// if there is an existing secret in Kubernetes, that will get synced by the
|
||||
// list/watch once the controller is initialized.
|
||||
if s.secrets == nil {
|
||||
if !s.initComplete() {
|
||||
// Start a goroutine to attempt to save the secret later, once init is complete.
|
||||
// If this was already handled by initComplete, it should be a no-op, or at worst get
|
||||
// merged with the Kubernetes secret.
|
||||
go s.initSync.Do(func() {
|
||||
if err := wait.Poll(100*time.Millisecond, 15*time.Minute, func() (bool, error) {
|
||||
if !s.initComplete() {
|
||||
return false, nil
|
||||
}
|
||||
_, err := s.saveInK8s(secret)
|
||||
return true, err
|
||||
}); err != nil {
|
||||
logrus.Errorf("Failed to save TLS secret after controller init: %v", err)
|
||||
}
|
||||
})
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
@@ -216,38 +214,54 @@ func (s *storage) saveInK8s(secret *v1.Secret) (*v1.Secret, error) {
|
||||
return targetSecret, nil
|
||||
}
|
||||
|
||||
// Any changes to the cert will change the fingerprint annotation, so we can use that
|
||||
// for change detection, and skip updating an existing secret if it has not changed.
|
||||
changed := !maps.Equal(targetSecret.Annotations, secret.Annotations)
|
||||
|
||||
targetSecret.Type = v1.SecretTypeTLS
|
||||
targetSecret.Annotations = secret.Annotations
|
||||
targetSecret.Type = v1.SecretTypeTLS
|
||||
targetSecret.Data = secret.Data
|
||||
|
||||
if targetSecret.UID == "" {
|
||||
logrus.Infof("Creating new TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
|
||||
return s.secrets.Create(targetSecret)
|
||||
} else if changed {
|
||||
logrus.Infof("Updating TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
|
||||
return s.secrets.Update(targetSecret)
|
||||
}
|
||||
return targetSecret, nil
|
||||
logrus.Infof("Updating TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
|
||||
return s.secrets.Update(targetSecret)
|
||||
}
|
||||
|
||||
func (s *storage) Update(secret *v1.Secret) error {
|
||||
// Asynchronously update the Kubernetes secret, as doing so inline may block the listener from
|
||||
// accepting new connections if the apiserver becomes unavailable after the Secrets controller
|
||||
// has been initialized. We're not passing around any contexts here, nor does the controller
|
||||
// accept any, so there's no good way to soft-fail with a reasonable timeout.
|
||||
go func() {
|
||||
if err := s.update(secret); err != nil {
|
||||
logrus.Errorf("Failed to save TLS secret for %s/%s: %v", secret.Namespace, secret.Name, err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func isConflictOrAlreadyExists(err error) bool {
|
||||
return errors.IsConflict(err) || errors.IsAlreadyExists(err)
|
||||
}
|
||||
|
||||
// update wraps a conflict retry around saveInK8s, which handles merging the
|
||||
// queued secret with the Kubernetes secret. Only after successfully
|
||||
// updating the Kubernetes secret will the backing storage be updated.
|
||||
func (s *storage) update() (err error) {
|
||||
func (s *storage) update(secret *v1.Secret) (err error) {
|
||||
var newSecret *v1.Secret
|
||||
if err := retry.OnError(retry.DefaultRetry, isConflictOrAlreadyExists, func() error {
|
||||
newSecret, err = s.saveInK8s(s.queuedSecret)
|
||||
err = retry.OnError(retry.DefaultRetry, isConflictOrAlreadyExists, func() error {
|
||||
newSecret, err = s.saveInK8s(secret)
|
||||
return err
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only hold the lock while updating underlying storage
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.storage.Update(newSecret)
|
||||
}
|
||||
|
||||
func (s *storage) initComplete() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.initialized
|
||||
}
|
||||
|
Reference in New Issue
Block a user