mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
print instruction in case of failed kubectl 1.3 rolling-update against 1.4 cluster
This commit is contained in:
parent
0877816ec8
commit
c4ea205aeb
18
CHANGELOG.md
18
CHANGELOG.md
@ -11,6 +11,9 @@
|
||||
- [v1.4.0-beta.3](#v140-beta3)
|
||||
- [Downloads](#downloads-2)
|
||||
- [Changelog since v1.4.0-beta.2](#changelog-since-v140-beta2)
|
||||
- [Behavior changes caused by enabling the garbage collector](#behavior-changes-caused-by-enabling-the-garbage-collector)
|
||||
- [kubectl rolling-update](#kubectl-rolling-update)
|
||||
- [kubectl delete](#kubectl-delete)
|
||||
- [v1.4.0-beta.2](#v140-beta2)
|
||||
- [Downloads](#downloads-3)
|
||||
- [Changelog since v1.4.0-beta.1](#changelog-since-v140-beta1)
|
||||
@ -216,6 +219,21 @@ binary | sha256 hash
|
||||
|
||||
**No notable changes for this release**
|
||||
|
||||
## Behavior changes caused by enabling the garbage collector
|
||||
|
||||
### kubectl rolling-update
|
||||
|
||||
Old version kubectl's rolling-update command is compatible with Kubernetes 1.4 and higher **only if** you specify a new replication controller name. You will need to update to kubectl 1.4 or higher to use the rolling update command against a 1.4 cluster if you want to keep the original name, or you'll have to do two rolling updates.
|
||||
|
||||
If you do happen to use old version kubectl's rolling update against a 1.4 cluster, it will fail, usually with an error message that will direct you here. If you saw that error, then don't worry, the operation succeeded except for the part where the new replication controller is renamed back to the old name. You can just do another rolling update using kubectl 1.4 or higher to change the name back: look for a replication controller that has the original name plus a random suffix.
|
||||
|
||||
Unfortunately, there is a much rarer second possible failure mode: the replication controller gets renamed to the old name, but there is a duplicated set of pods in the cluster. kubectl will not report an error since it thinks its job is done.
|
||||
|
||||
If this happens to you, you can wait at most 10 minutes for the replication controller to start a resync, the extra pods will then be deleted. Or, you can manually trigger a resync by change the replicas in the spec of the replication controller.
|
||||
|
||||
### kubectl delete
|
||||
|
||||
If you use an old version kubectl to delete a replication controller or replicaset, then after the delete command has returned, the replication controller or the replicaset will continue to exist in the key-value store for a short period of time (<1s). You probably will not notice any difference if you use kubectl manually, but you might notice it if you are using kubectl in a script.
|
||||
|
||||
|
||||
# v1.4.0-beta.2
|
||||
|
@ -59,6 +59,9 @@ const (
|
||||
|
||||
// uidKey is the context key for the uid to assign to an object on create.
|
||||
uidKey
|
||||
|
||||
// userAgentKey is the context key for the request user agent.
|
||||
userAgentKey
|
||||
)
|
||||
|
||||
// NewContext instantiates a base context object for request flows.
|
||||
@ -136,3 +139,14 @@ func UIDFrom(ctx Context) (types.UID, bool) {
|
||||
uid, ok := ctx.Value(uidKey).(types.UID)
|
||||
return uid, ok
|
||||
}
|
||||
|
||||
// WithUserAgent returns a copy of parent in which the user value is set
|
||||
func WithUserAgent(parent Context, userAgent string) Context {
|
||||
return WithValue(parent, userAgentKey, userAgent)
|
||||
}
|
||||
|
||||
// UserAgentFrom returns the value of the userAgent key on the ctx
|
||||
func UserAgentFrom(ctx Context) (string, bool) {
|
||||
userAgent, ok := ctx.Value(userAgentKey).(string)
|
||||
return userAgent, ok
|
||||
}
|
||||
|
@ -320,12 +320,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
|
||||
var ctxFn ContextFunc
|
||||
ctxFn = func(req *restful.Request) api.Context {
|
||||
if context == nil {
|
||||
return api.NewContext()
|
||||
return api.WithUserAgent(api.NewContext(), req.HeaderParameter("User-Agent"))
|
||||
}
|
||||
if ctx, ok := context.Get(req.Request); ok {
|
||||
return ctx
|
||||
return api.WithUserAgent(ctx, req.HeaderParameter("User-Agent"))
|
||||
}
|
||||
return api.NewContext()
|
||||
return api.WithUserAgent(api.NewContext(), req.HeaderParameter("User-Agent"))
|
||||
}
|
||||
|
||||
allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list.
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/storage"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -218,6 +219,25 @@ func (e *Store) ListPredicate(ctx api.Context, m *generic.SelectionPredicate, op
|
||||
return list, storeerr.InterpretListError(err, e.QualifiedResource)
|
||||
}
|
||||
|
||||
// TODO: remove this function after 1.6
|
||||
// returns if the user agent is is kubectl older than v1.4.0
|
||||
func isOldKubectl(userAgent string) bool {
|
||||
// example userAgent string: kubectl-1.3/v1.3.8 (linux/amd64) kubernetes/e328d5b
|
||||
if !strings.Contains(userAgent, "kubectl") {
|
||||
return false
|
||||
}
|
||||
userAgent = strings.Split(userAgent, " ")[0]
|
||||
subs := strings.Split(userAgent, "/")
|
||||
if len(subs) != 2 {
|
||||
return false
|
||||
}
|
||||
kubectlVersion, versionErr := version.Parse(subs[1])
|
||||
if versionErr != nil {
|
||||
return false
|
||||
}
|
||||
return kubectlVersion.LT(version.MustParse("v1.4.0"))
|
||||
}
|
||||
|
||||
// Create inserts a new item according to the unique key from the object.
|
||||
func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
|
||||
if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
|
||||
@ -252,6 +272,15 @@ func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
|
||||
if accessor.GetDeletionTimestamp() != nil {
|
||||
msg := &err.(*kubeerr.StatusError).ErrStatus.Message
|
||||
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
|
||||
// TODO: remove this block after 1.6
|
||||
userAgent, _ := api.UserAgentFrom(ctx)
|
||||
if !isOldKubectl(userAgent) {
|
||||
return nil, err
|
||||
}
|
||||
if e.QualifiedResource.Resource != "replicationcontrollers" {
|
||||
return nil, err
|
||||
}
|
||||
*msg = fmt.Sprintf("Note: if you are using \"kubectl rolling-update\" and your kubectl version is older than v1.4.0, your rolling-update has probably failed, though the pods are correctly updated. Please see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#kubectl-rolling-update for a workaround. : %s", *msg)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user