Compare commits

...

9 Commits

Author SHA1 Message Date
Mary
63a9f346f9 setting TLSError message as debug (#197) 2025-09-09 11:14:52 -07:00
Siva Kanakala
2fb4ae1e2e fix dynamic-cert.json permission (#196) 2025-09-09 09:44:31 +05:30
Brad Davidson
b12f85e82a Sync changes to Kubernetes secret through workqueue instead of goroutines with locks (#202)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
2025-09-08 13:52:35 -07:00
renovate-rancher[bot]
5bcd922919 Update actions/checkout action to v4.3.0 (#190)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2025-08-12 08:27:09 -04:00
Chad Roberts
2d665ea308 Add CODEOWNERS (#189) 2025-08-07 10:58:07 -07:00
renovate-rancher[bot]
8fd666b26c Migrate config .github/renovate.json (#167)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2025-06-24 11:12:58 -04:00
renovate-rancher[bot]
6a93e7e127 Update actions/setup-go action to v5.5.0 (#177)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2025-06-24 11:12:32 -04:00
renovate-rancher[bot]
db1147364d Update module github.com/rancher/wrangler/v3 to v3.2.2-rc.3 (#183)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2025-06-24 11:11:22 -04:00
Chad Roberts
6144f3d8db Update VERSION.md for v0.7 (#181) 2025-06-11 15:06:36 -04:00
12 changed files with 238 additions and 174 deletions

28
.github/renovate.json vendored
View File

@@ -3,24 +3,30 @@
"github>rancher/renovate-config#release"
],
"baseBranches": [
"main", "release/v0.3", "release/v0.4", "release/v0.5"
"main",
"release/v0.3",
"release/v0.4",
"release/v0.5"
],
"prHourlyLimit": 2,
"packageRules": [
{
"matchPackagePatterns": [
"k8s.io/*",
"sigs.k8s.io/*",
"github.com/prometheus/*"
],
"enabled": false
"enabled": false,
"matchPackageNames": [
"/k8s.io/*/",
"/sigs.k8s.io/*/",
"/github.com/prometheus/*/"
]
},
{
"matchPackagePatterns": [
"github.com/rancher/wrangler/*"
"matchUpdateTypes": [
"major",
"minor"
],
"matchUpdateTypes": ["major", "minor"],
"enabled": false
"enabled": false,
"matchPackageNames": [
"/github.com/rancher/wrangler/*/"
]
}
]
}

View File

@@ -11,10 +11,10 @@ jobs:
steps:
- name: Checkout code
# https://github.com/actions/checkout/releases/tag/VERSION
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Install Go
# https://github.com/actions/setup-go/releases/tag/VERSION
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version-file: 'go.mod'
- run: go test -v -race -cover ./...

View File

@@ -12,7 +12,7 @@ jobs:
contents: write
steps:
- name : Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Create release on Github
run: |

1
CODEOWNERS Normal file
View File

@@ -0,0 +1 @@
* @rancher/rancher-squad-frameworks

View File

@@ -3,8 +3,9 @@ DynamicListener follows a pre-release (v0.x) strategy of semver. There is limite
The current supported release lines are:
| DynamicListener Branch | DynamicListener Minor version | Kubernetes Version Range | Wrangler Version |
|--------------------------|------------------------------------|------------------------------------------------|------------------------------------------------|
| main | v0.6 | v1.27+ | v3 |
|------------------------|-------------------------------|--------------------------|------------------------------------------------|
| main | v0.7 | v1.27+ | v3 |
| release/v0.6 | v0.6 | v1.27 - v1.32 | v3 |
| release/v0.5 | v0.5 | v1.26 - v1.30 | v3 |
| release/v0.4 | v0.4 | v1.25 - v1.28 | v2 |
| release/v0.3 | v0.3 | v1.23 - v1.27 | v2 |

View File

@@ -195,6 +195,10 @@ func (t *TLS) generateCert(secret *v1.Secret, cn ...string) (*v1.Secret, bool, e
}
func (t *TLS) IsExpired(secret *v1.Secret) bool {
if secret == nil {
return false
}
certsPem := secret.Data[v1.TLSCertKey]
if len(certsPem) == 0 {
return false

4
go.mod
View File

@@ -5,7 +5,7 @@ go 1.24.0
toolchain go1.24.3
require (
github.com/rancher/wrangler/v3 v3.2.2-rc.1
github.com/rancher/wrangler/v3 v3.2.2-rc.3
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.36.0
@@ -41,7 +41,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rancher/lasso v0.2.3-rc1 // indirect
github.com/rancher/lasso v0.2.3-rc3 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect

12
go.sum
View File

@@ -72,10 +72,10 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rancher/lasso v0.2.3-rc1 h1:dRerAaAiziQKSVasSQe3Av2aNGokjidzZD/bLLLIQT4=
github.com/rancher/lasso v0.2.3-rc1/go.mod h1:L8ct0T/HAYTWLKWPBQCZvluqmr72Yl5YOewjgRRvnMk=
github.com/rancher/wrangler/v3 v3.2.2-rc.1 h1:UoReGk+6sZD9uVoCpwdFWrznhVs4jKDsZ+fNNMJ0EQE=
github.com/rancher/wrangler/v3 v3.2.2-rc.1/go.mod h1:G+gawqOKIo7i1jtgPKvzU3RzA+3OCaAsYVxq03Iya20=
github.com/rancher/lasso v0.2.3-rc3 h1:kkYnARdFeY6A9E2XnjfQbG8CssHQwobPMIFqPRGpVxc=
github.com/rancher/lasso v0.2.3-rc3/go.mod h1:G+KeeOaKRjp+qGp0bV6VbLhYrq1vHbJPbDh40ejg5yE=
github.com/rancher/wrangler/v3 v3.2.2-rc.3 h1:ObcqAxQkQFP6r1YI3zJhi9v9PE+UUNNZpelz6NSpQnc=
github.com/rancher/wrangler/v3 v3.2.2-rc.3/go.mod h1:ukbwLYT+MTCx+43aXNQNYxZizQpeo0gILK05k4RoW7o=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
@@ -95,8 +95,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=

View File

@@ -1,11 +1,13 @@
package server
import (
"bytes"
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"log"
"net"
"net/http"
@@ -40,7 +42,24 @@ type ListenOpts struct {
// Override legacy behavior where server logs written to the application's logrus object
// were dropped unless logrus was set to debug-level (such as by launching steve with '--debug').
// Setting this to true results in server logs appearing at an ERROR level.
DisplayServerLogs bool
DisplayServerLogs bool
IgnoreTLSHandshakeError bool
}
var TLSHandshakeError = []byte("http: TLS handshake error")
var _ io.Writer = &TLSErrorDebugger{}
type TLSErrorDebugger struct{}
func (t *TLSErrorDebugger) Write(p []byte) (n int, err error) {
p = bytes.TrimSpace(p)
if bytes.HasPrefix(p, TLSHandshakeError) {
logrus.Debug(string(p))
} else {
logrus.Error(string(p))
}
return len(p), err
}
func ListenAndServe(ctx context.Context, httpsPort, httpPort int, handler http.Handler, opts *ListenOpts) error {
@@ -52,9 +71,15 @@ func ListenAndServe(ctx context.Context, httpsPort, httpPort int, handler http.H
if opts.DisplayServerLogs {
writer = logger.WriterLevel(logrus.ErrorLevel)
}
// Otherwise preserve legacy behaviour of displaying server logs only in debug mode.
errorLog := log.New(writer, "", log.LstdFlags)
var errorLog *log.Logger
if opts.IgnoreTLSHandshakeError {
debugWriter := &TLSErrorDebugger{}
errorLog = log.New(debugWriter, "", 0)
} else {
// Otherwise preserve legacy behaviour of displaying server logs only in debug mode.
errorLog = log.New(writer, "", 0)
}
if opts.TLSListenerConfig.TLSConfig == nil {
opts.TLSListenerConfig.TLSConfig = &tls.Config{}

View File

@@ -39,6 +39,47 @@ func (s *safeWriter) Write(p []byte) (n int, err error) {
return s.writer.Write(p)
}
func TestTLSHandshakeErrorWriter(t *testing.T) {
tests := []struct {
name string
ignoreTLSHandshakeError bool
message []byte
expectedLevel logrus.Level
}{
{
name: "TLS handshake error is logged as debug",
message: []byte("http: TLS handshake error: EOF"),
expectedLevel: logrus.DebugLevel,
},
{
name: "other errors are logged as error",
message: []byte("some other server error"),
expectedLevel: logrus.ErrorLevel,
},
}
var baseLogLevel = logrus.GetLevel()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert := assertPkg.New(t)
var buf bytes.Buffer
logrus.SetOutput(&buf)
logrus.SetLevel(logrus.DebugLevel)
debugger := &TLSErrorDebugger{}
n, err := debugger.Write(tt.message)
assert.Nil(err)
assert.Equal(len(tt.message), n)
logOutput := buf.String()
assert.Contains(logOutput, "level="+tt.expectedLevel.String())
assert.Contains(logOutput, string(tt.message))
})
}
logrus.SetLevel(baseLogLevel)
}
func TestHttpServerLogWithLogrus(t *testing.T) {
assert := assertPkg.New(t)
message := "debug-level writer"
@@ -84,7 +125,7 @@ func doRequest(safeWriter *safeWriter, message string, logLevel logrus.Level) er
msg := fmt.Sprintf("panicking context: %s", message)
handler := alwaysPanicHandler{msg: msg}
listenOpts := &ListenOpts{
BindHost: host,
BindHost: host,
DisplayServerLogs: logLevel == logrus.ErrorLevel,
}

View File

@@ -32,7 +32,7 @@ func (s *storage) Get() (*v1.Secret, error) {
}
func (s *storage) Update(secret *v1.Secret) error {
f, err := os.Create(s.file)
f, err := os.OpenFile(s.file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
return err
}

View File

@@ -2,33 +2,47 @@ package kubernetes
import (
"context"
"sync"
"maps"
"time"
"github.com/rancher/dynamiclistener"
"github.com/rancher/dynamiclistener/cert"
"github.com/rancher/wrangler/v3/pkg/generated/controllers/core"
v1controller "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/v3/pkg/start"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
toolswatch "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
)
type CoreGetter func() *core.Factory
type storage struct {
namespace, name string
storage dynamiclistener.TLSStorage
secrets v1controller.SecretController
tls dynamiclistener.TLSFactory
queue workqueue.TypedInterface[string]
queuedSecret *v1.Secret
}
func Load(ctx context.Context, secrets v1controller.SecretController, namespace, name string, backing dynamiclistener.TLSStorage) dynamiclistener.TLSStorage {
storage := &storage{
name: name,
namespace: namespace,
storage: backing,
ctx: ctx,
initSync: &sync.Once{},
queue: workqueue.NewTyped[string](),
}
storage.init(secrets)
storage.runQueue()
storage.init(ctx, secrets)
return storage
}
@@ -37,16 +51,16 @@ func New(ctx context.Context, core CoreGetter, namespace, name string, backing d
name: name,
namespace: namespace,
storage: backing,
ctx: ctx,
initSync: &sync.Once{},
queue: workqueue.NewTyped[string](),
}
storage.runQueue()
// lazy init
go func() {
wait.PollImmediateUntilWithContext(ctx, time.Second, func(cxt context.Context) (bool, error) {
if coreFactory := core(); coreFactory != nil {
storage.init(coreFactory.Core().V1().Secret())
return true, start.All(ctx, 5, coreFactory)
storage.init(ctx, coreFactory.Core().V1().Secret())
return true, nil
}
return false, nil
})
@@ -55,100 +69,94 @@ func New(ctx context.Context, core CoreGetter, namespace, name string, backing d
return storage
}
type storage struct {
sync.RWMutex
namespace, name string
storage dynamiclistener.TLSStorage
secrets v1controller.SecretController
ctx context.Context
tls dynamiclistener.TLSFactory
initialized bool
initSync *sync.Once
}
func (s *storage) SetFactory(tls dynamiclistener.TLSFactory) {
s.Lock()
defer s.Unlock()
s.tls = tls
}
func (s *storage) init(secrets v1controller.SecretController) {
s.Lock()
defer s.Unlock()
secrets.OnChange(s.ctx, "tls-storage", func(key string, secret *v1.Secret) (*v1.Secret, error) {
if secret == nil {
return nil, nil
}
if secret.Namespace == s.namespace && secret.Name == s.name {
if err := s.Update(secret); err != nil {
return nil, err
}
}
return secret, nil
})
s.secrets = secrets
// Asynchronously sync the backing storage to the Kubernetes secret, as doing so inline may
// block the listener from accepting new connections if the apiserver becomes unavailable
// after the Secrets controller has been initialized. We're not passing around any contexts
// here, nor does the controller accept any, so there's no good way to soft-fail with a
// reasonable timeout.
go s.syncStorage()
}
func (s *storage) syncStorage() {
var updateStorage bool
secret, err := s.Get()
if err == nil && cert.IsValidTLSSecret(secret) {
// local storage had a cached secret, ensure that it exists in Kubernetes
_, err := s.secrets.Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: s.name,
Namespace: s.namespace,
Annotations: secret.Annotations,
},
Type: v1.SecretTypeTLS,
Data: secret.Data,
})
if err != nil && !errors.IsAlreadyExists(err) {
logrus.Warnf("Failed to create Kubernetes secret: %v", err)
}
} else {
// local storage was empty, try to populate it
secret, err = s.secrets.Get(s.namespace, s.name, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
logrus.Warnf("Failed to init Kubernetes secret: %v", err)
}
} else {
updateStorage = true
}
}
s.Lock()
defer s.Unlock()
s.initialized = true
if updateStorage {
if err := s.storage.Update(secret); err != nil {
logrus.Warnf("Failed to init backing storage secret: %v", err)
}
}
}
// always return secret from backing storage
func (s *storage) Get() (*v1.Secret, error) {
s.RLock()
defer s.RUnlock()
return s.storage.Get()
}
func (s *storage) targetSecret() (*v1.Secret, error) {
s.RLock()
defer s.RUnlock()
// sync secret to Kubernetes and backing storage via workqueue
func (s *storage) Update(secret *v1.Secret) error {
// Asynchronously update the Kubernetes secret, as doing so inline may block the listener from
// accepting new connections if the apiserver becomes unavailable after the Secrets controller
// has been initialized.
s.queuedSecret = secret
s.queue.Add(s.name)
return nil
}
func (s *storage) SetFactory(tls dynamiclistener.TLSFactory) {
s.tls = tls
}
func (s *storage) init(ctx context.Context, secrets v1controller.SecretController) {
s.secrets = secrets
// Watch just the target secret, instead of using a wrangler OnChange handler
// which watches all secrets in all namespaces. Changes to the secret
// will be sent through the workqueue.
go func() {
fieldSelector := fields.Set{"metadata.name": s.name}.String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return secrets.List(s.namespace, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return secrets.Watch(s.namespace, options)
},
}
_, _, watch, done := toolswatch.NewIndexerInformerWatcher(lw, &v1.Secret{})
defer func() {
s.queue.ShutDown()
watch.Stop()
<-done
}()
for {
select {
case <-ctx.Done():
return
case ev := <-watch.ResultChan():
if secret, ok := ev.Object.(*v1.Secret); ok {
s.queuedSecret = secret
s.queue.Add(secret.Name)
}
}
}
}()
// enqueue initial sync of the backing secret
s.queuedSecret, _ = s.Get()
s.queue.Add(s.name)
}
// runQueue starts a goroutine to process secrets updates from the workqueue
func (s *storage) runQueue() {
go func() {
for s.processQueue() {
}
}()
}
// processQueue processes the secret update queue.
// The key doesn't actually matter, as we are only handling a single secret with a single worker.
func (s *storage) processQueue() bool {
key, shutdown := s.queue.Get()
if shutdown {
return false
}
defer s.queue.Done(key)
if err := s.update(); err != nil {
logrus.Errorf("Failed to update Secret %s/%s: %v", s.namespace, s.name, err)
}
return true
}
func (s *storage) targetSecret() (*v1.Secret, error) {
existingSecret, err := s.secrets.Get(s.namespace, s.name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return &v1.Secret{
@@ -162,22 +170,16 @@ func (s *storage) targetSecret() (*v1.Secret, error) {
return existingSecret, err
}
// saveInK8s handles merging the provided secret with the kubernetes secret.
// This includes calling the tls factory to sign a new certificate with the
// merged SAN entries, if possible. Note that the provided secret could be
// either from Kubernetes due to the secret being changed by another client, or
// from the listener trying to add SANs or regenerate the cert.
func (s *storage) saveInK8s(secret *v1.Secret) (*v1.Secret, error) {
if !s.initComplete() {
// Start a goroutine to attempt to save the secret later, once init is complete.
// If this was already handled by initComplete, it should be a no-op, or at worst get
// merged with the Kubernetes secret.
go s.initSync.Do(func() {
if err := wait.Poll(100*time.Millisecond, 15*time.Minute, func() (bool, error) {
if !s.initComplete() {
return false, nil
}
_, err := s.saveInK8s(secret)
return true, err
}); err != nil {
logrus.Errorf("Failed to save TLS secret after controller init: %v", err)
}
})
// secret controller not initialized yet, just return the current secret.
// if there is an existing secret in Kubernetes, that will get synced by the
// list/watch once the controller is initialized.
if s.secrets == nil {
return secret, nil
}
@@ -214,54 +216,38 @@ func (s *storage) saveInK8s(secret *v1.Secret) (*v1.Secret, error) {
return targetSecret, nil
}
targetSecret.Annotations = secret.Annotations
// Any changes to the cert will change the fingerprint annotation, so we can use that
// for change detection, and skip updating an existing secret if it has not changed.
changed := !maps.Equal(targetSecret.Annotations, secret.Annotations)
targetSecret.Type = v1.SecretTypeTLS
targetSecret.Annotations = secret.Annotations
targetSecret.Data = secret.Data
if targetSecret.UID == "" {
logrus.Infof("Creating new TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
return s.secrets.Create(targetSecret)
} else if changed {
logrus.Infof("Updating TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
return s.secrets.Update(targetSecret)
}
logrus.Infof("Updating TLS secret for %s/%s (count: %d): %v", targetSecret.Namespace, targetSecret.Name, len(targetSecret.Annotations)-1, targetSecret.Annotations)
return s.secrets.Update(targetSecret)
}
func (s *storage) Update(secret *v1.Secret) error {
// Asynchronously update the Kubernetes secret, as doing so inline may block the listener from
// accepting new connections if the apiserver becomes unavailable after the Secrets controller
// has been initialized. We're not passing around any contexts here, nor does the controller
// accept any, so there's no good way to soft-fail with a reasonable timeout.
go func() {
if err := s.update(secret); err != nil {
logrus.Errorf("Failed to save TLS secret for %s/%s: %v", secret.Namespace, secret.Name, err)
}
}()
return nil
return targetSecret, nil
}
func isConflictOrAlreadyExists(err error) bool {
return errors.IsConflict(err) || errors.IsAlreadyExists(err)
}
func (s *storage) update(secret *v1.Secret) (err error) {
// update wraps a conflict retry around saveInK8s, which handles merging the
// queued secret with the Kubernetes secret. Only after successfully
// updating the Kubernetes secret will the backing storage be updated.
func (s *storage) update() (err error) {
var newSecret *v1.Secret
err = retry.OnError(retry.DefaultRetry, isConflictOrAlreadyExists, func() error {
newSecret, err = s.saveInK8s(secret)
if err := retry.OnError(retry.DefaultRetry, isConflictOrAlreadyExists, func() error {
newSecret, err = s.saveInK8s(s.queuedSecret)
return err
})
if err != nil {
}); err != nil {
return err
}
// Only hold the lock while updating underlying storage
s.Lock()
defer s.Unlock()
return s.storage.Update(newSecret)
}
func (s *storage) initComplete() bool {
s.RLock()
defer s.RUnlock()
return s.initialized
}