mirror of
https://github.com/rancher/rke.git
synced 2025-09-19 10:26:20 +00:00
Update vendor
This commit is contained in:
16
vendor/k8s.io/kubectl/pkg/drain/cordon.go
generated
vendored
16
vendor/k8s.io/kubectl/pkg/drain/cordon.go
generated
vendored
@@ -17,9 +17,11 @@ limitations under the License.
|
||||
package drain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -70,7 +72,7 @@ func (c *CordonHelper) UpdateIfRequired(desired bool) bool {
|
||||
// updating the given node object; it may return error if the object cannot be encoded as
|
||||
// JSON, or if either patch or update calls fail; it will also return a second error
|
||||
// whenever creating a patch has failed
|
||||
func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, error) {
|
||||
func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface, serverDryRun bool) (error, error) {
|
||||
client := clientset.CoreV1().Nodes()
|
||||
|
||||
oldData, err := json.Marshal(c.node)
|
||||
@@ -87,9 +89,17 @@ func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, er
|
||||
|
||||
patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node)
|
||||
if patchErr == nil {
|
||||
_, err = client.Patch(c.node.Name, types.StrategicMergePatchType, patchBytes)
|
||||
patchOptions := metav1.PatchOptions{}
|
||||
if serverDryRun {
|
||||
patchOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
_, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes, patchOptions)
|
||||
} else {
|
||||
_, err = client.Update(c.node)
|
||||
updateOptions := metav1.UpdateOptions{}
|
||||
if serverDryRun {
|
||||
updateOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
_, err = client.Update(context.TODO(), c.node, updateOptions)
|
||||
}
|
||||
return err, patchErr
|
||||
}
|
||||
|
2
vendor/k8s.io/kubectl/pkg/drain/default.go
generated
vendored
2
vendor/k8s.io/kubectl/pkg/drain/default.go
generated
vendored
@@ -57,7 +57,7 @@ func RunCordonOrUncordon(drainer *Helper, node *corev1.Node, desired bool) error
|
||||
return nil
|
||||
}
|
||||
|
||||
err, patchErr := c.PatchOrReplace(drainer.Client)
|
||||
err, patchErr := c.PatchOrReplace(drainer.Client, false)
|
||||
if patchErr != nil {
|
||||
return patchErr
|
||||
}
|
||||
|
168
vendor/k8s.io/kubectl/pkg/drain/drain.go
generated
vendored
168
vendor/k8s.io/kubectl/pkg/drain/drain.go
generated
vendored
@@ -31,7 +31,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,10 +41,12 @@ const (
|
||||
EvictionKind = "Eviction"
|
||||
// EvictionSubresource represents the kind of evictions object as pod's subresource
|
||||
EvictionSubresource = "pods/eviction"
|
||||
podSkipMsgTemplate = "pod %q has DeletionTimestamp older than %v seconds, skipping\n"
|
||||
)
|
||||
|
||||
// Helper contains the parameters to control the behaviour of drainer
|
||||
type Helper struct {
|
||||
Ctx context.Context
|
||||
Client kubernetes.Interface
|
||||
Force bool
|
||||
GracePeriodSeconds int
|
||||
@@ -51,16 +55,39 @@ type Helper struct {
|
||||
DeleteLocalData bool
|
||||
Selector string
|
||||
PodSelector string
|
||||
Out io.Writer
|
||||
ErrOut io.Writer
|
||||
|
||||
// TODO(justinsb): unnecessary?
|
||||
DryRun bool
|
||||
// DisableEviction forces drain to use delete rather than evict
|
||||
DisableEviction bool
|
||||
|
||||
// SkipWaitForDeleteTimeoutSeconds ignores pods that have a
|
||||
// DeletionTimeStamp > N seconds. It's up to the user to decide when this
|
||||
// option is appropriate; examples include the Node is unready and the pods
|
||||
// won't drain otherwise
|
||||
SkipWaitForDeleteTimeoutSeconds int
|
||||
|
||||
Out io.Writer
|
||||
ErrOut io.Writer
|
||||
|
||||
DryRunStrategy cmdutil.DryRunStrategy
|
||||
DryRunVerifier *resource.DryRunVerifier
|
||||
|
||||
// OnPodDeletedOrEvicted is called when a pod is evicted/deleted; for printing progress output
|
||||
OnPodDeletedOrEvicted func(pod *corev1.Pod, usingEviction bool)
|
||||
}
|
||||
|
||||
type waitForDeleteParams struct {
|
||||
ctx context.Context
|
||||
pods []corev1.Pod
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
usingEviction bool
|
||||
getPodFn func(string, string) (*corev1.Pod, error)
|
||||
onDoneFn func(pod *corev1.Pod, usingEviction bool)
|
||||
globalTimeout time.Duration
|
||||
skipWaitForDeleteTimeoutSeconds int
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
// CheckEvictionSupport uses Discovery API to find out if the server support
|
||||
// eviction subresource If support, it will return its groupVersion; Otherwise,
|
||||
// it will return an empty string
|
||||
@@ -94,22 +121,37 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
func (d *Helper) makeDeleteOptions() metav1.DeleteOptions {
|
||||
deleteOptions := metav1.DeleteOptions{}
|
||||
if d.GracePeriodSeconds >= 0 {
|
||||
gracePeriodSeconds := int64(d.GracePeriodSeconds)
|
||||
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
|
||||
}
|
||||
if d.DryRunStrategy == cmdutil.DryRunServer {
|
||||
deleteOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
return deleteOptions
|
||||
}
|
||||
|
||||
// DeletePod will delete the given pod, or return an error if it couldn't
|
||||
func (d *Helper) DeletePod(pod corev1.Pod) error {
|
||||
return d.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, d.makeDeleteOptions())
|
||||
if d.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, d.makeDeleteOptions())
|
||||
}
|
||||
|
||||
// EvictPod will evict the give pod, or return an error if it couldn't
|
||||
func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
|
||||
if d.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
delOpts := d.makeDeleteOptions()
|
||||
eviction := &policyv1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
@@ -119,10 +161,11 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
DeleteOptions: d.makeDeleteOptions(),
|
||||
DeleteOptions: &delOpts,
|
||||
}
|
||||
|
||||
// Remember to change change the URL manipulation func when Eviction's version change
|
||||
return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
|
||||
return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(context.TODO(), eviction)
|
||||
}
|
||||
|
||||
// GetPodsForDeletion receives resource info for a node, and returns those pods as PodDeleteList,
|
||||
@@ -135,7 +178,7 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*podDeleteList, []error) {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
|
||||
podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labelSelector.String(),
|
||||
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
|
||||
if err != nil {
|
||||
@@ -155,12 +198,13 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*podDeleteList, []error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if status.delete {
|
||||
pods = append(pods, podDelete{
|
||||
pod: pod,
|
||||
status: status,
|
||||
})
|
||||
}
|
||||
// Add the pod to podDeleteList no matter what podDeleteStatus is,
|
||||
// those pods whose podDeleteStatus is false like DaemonSet will
|
||||
// be catched by list.errors()
|
||||
pods = append(pods, podDelete{
|
||||
pod: pod,
|
||||
status: status,
|
||||
})
|
||||
}
|
||||
|
||||
list := &podDeleteList{items: pods}
|
||||
@@ -178,17 +222,20 @@ func (d *Helper) DeleteOrEvictPods(pods []corev1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
policyGroupVersion, err := CheckEvictionSupport(d.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(justinsb): unnecessary?
|
||||
getPodFn := func(namespace, name string) (*corev1.Pod, error) {
|
||||
return d.Client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
return d.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
}
|
||||
if len(policyGroupVersion) > 0 {
|
||||
return d.evictPods(pods, policyGroupVersion, getPodFn)
|
||||
|
||||
if !d.DisableEviction {
|
||||
policyGroupVersion, err := CheckEvictionSupport(d.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(policyGroupVersion) > 0 {
|
||||
return d.evictPods(pods, policyGroupVersion, getPodFn)
|
||||
}
|
||||
}
|
||||
|
||||
return d.deletePods(pods, getPodFn)
|
||||
@@ -203,12 +250,17 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
|
||||
} else {
|
||||
globalTimeout = d.Timeout
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), globalTimeout)
|
||||
ctx, cancel := context.WithTimeout(d.getContext(), globalTimeout)
|
||||
defer cancel()
|
||||
for _, pod := range pods {
|
||||
go func(pod corev1.Pod, returnCh chan error) {
|
||||
for {
|
||||
fmt.Fprintf(d.Out, "evicting pod %q\n", pod.Name)
|
||||
switch d.DryRunStrategy {
|
||||
case cmdutil.DryRunServer:
|
||||
fmt.Fprintf(d.Out, "evicting pod %s/%s (server dry run)\n", pod.Namespace, pod.Name)
|
||||
default:
|
||||
fmt.Fprintf(d.Out, "evicting pod %s/%s\n", pod.Namespace, pod.Name)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// return here or we'll leak a goroutine.
|
||||
@@ -230,7 +282,23 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err := waitForDelete(ctx, []corev1.Pod{pod}, 1*time.Second, time.Duration(math.MaxInt64), true, getPodFn, d.OnPodDeletedOrEvicted, globalTimeout)
|
||||
if d.DryRunStrategy == cmdutil.DryRunServer {
|
||||
returnCh <- nil
|
||||
return
|
||||
}
|
||||
params := waitForDeleteParams{
|
||||
ctx: ctx,
|
||||
pods: []corev1.Pod{pod},
|
||||
interval: 1 * time.Second,
|
||||
timeout: time.Duration(math.MaxInt64),
|
||||
usingEviction: true,
|
||||
getPodFn: getPodFn,
|
||||
onDoneFn: d.OnPodDeletedOrEvicted,
|
||||
globalTimeout: globalTimeout,
|
||||
skipWaitForDeleteTimeoutSeconds: d.SkipWaitForDeleteTimeoutSeconds,
|
||||
out: d.Out,
|
||||
}
|
||||
_, err := waitForDelete(params)
|
||||
if err == nil {
|
||||
returnCh <- nil
|
||||
} else {
|
||||
@@ -271,38 +339,64 @@ func (d *Helper) deletePods(pods []corev1.Pod, getPodFn func(namespace, name str
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx := context.TODO()
|
||||
_, err := waitForDelete(ctx, pods, 1*time.Second, globalTimeout, false, getPodFn, d.OnPodDeletedOrEvicted, globalTimeout)
|
||||
ctx := d.getContext()
|
||||
params := waitForDeleteParams{
|
||||
ctx: ctx,
|
||||
pods: pods,
|
||||
interval: 1 * time.Second,
|
||||
timeout: globalTimeout,
|
||||
usingEviction: false,
|
||||
getPodFn: getPodFn,
|
||||
onDoneFn: d.OnPodDeletedOrEvicted,
|
||||
globalTimeout: globalTimeout,
|
||||
skipWaitForDeleteTimeoutSeconds: d.SkipWaitForDeleteTimeoutSeconds,
|
||||
out: d.Out,
|
||||
}
|
||||
_, err := waitForDelete(params)
|
||||
return err
|
||||
}
|
||||
|
||||
func waitForDelete(ctx context.Context, pods []corev1.Pod, interval, timeout time.Duration, usingEviction bool, getPodFn func(string, string) (*corev1.Pod, error), onDoneFn func(pod *corev1.Pod, usingEviction bool), globalTimeout time.Duration) ([]corev1.Pod, error) {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
func waitForDelete(params waitForDeleteParams) ([]corev1.Pod, error) {
|
||||
pods := params.pods
|
||||
err := wait.PollImmediate(params.interval, params.timeout, func() (bool, error) {
|
||||
pendingPods := []corev1.Pod{}
|
||||
for i, pod := range pods {
|
||||
p, err := getPodFn(pod.Namespace, pod.Name)
|
||||
p, err := params.getPodFn(pod.Namespace, pod.Name)
|
||||
if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) {
|
||||
if onDoneFn != nil {
|
||||
onDoneFn(&pod, usingEviction)
|
||||
if params.onDoneFn != nil {
|
||||
params.onDoneFn(&pod, params.usingEviction)
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
if shouldSkipPod(*p, params.skipWaitForDeleteTimeoutSeconds) {
|
||||
fmt.Fprintf(params.out, podSkipMsgTemplate, pod.Name, params.skipWaitForDeleteTimeoutSeconds)
|
||||
continue
|
||||
}
|
||||
pendingPods = append(pendingPods, pods[i])
|
||||
}
|
||||
}
|
||||
pods = pendingPods
|
||||
if len(pendingPods) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, fmt.Errorf("global timeout reached: %v", globalTimeout)
|
||||
case <-params.ctx.Done():
|
||||
return false, fmt.Errorf("global timeout reached: %v", params.globalTimeout)
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// Since Helper does not have a constructor, we can't enforce Helper.Ctx != nil
|
||||
// Multiple public methods prevent us from initializing the context in a single
|
||||
// place as well.
|
||||
func (d *Helper) getContext() context.Context {
|
||||
if d.Ctx != nil {
|
||||
return d.Ctx
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
23
vendor/k8s.io/kubectl/pkg/drain/filters.go
generated
vendored
23
vendor/k8s.io/kubectl/pkg/drain/filters.go
generated
vendored
@@ -17,8 +17,10 @@ limitations under the License.
|
||||
package drain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -133,8 +135,11 @@ func makePodDeleteStatusWithError(message string) podDeleteStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// The filters are applied in a specific order, only the last filter's
|
||||
// message will be retained if there are any warnings.
|
||||
func (d *Helper) makeFilters() []podFilter {
|
||||
return []podFilter{
|
||||
d.skipDeletedFilter,
|
||||
d.daemonSetFilter,
|
||||
d.mirrorPodFilter,
|
||||
d.localStorageFilter,
|
||||
@@ -168,7 +173,7 @@ func (d *Helper) daemonSetFilter(pod corev1.Pod) podDeleteStatus {
|
||||
return makePodDeleteStatusOkay()
|
||||
}
|
||||
|
||||
if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil {
|
||||
if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(context.TODO(), controllerRef.Name, metav1.GetOptions{}); err != nil {
|
||||
// remove orphaned pods with a warning if --force is used
|
||||
if apierrors.IsNotFound(err) && d.Force {
|
||||
return makePodDeleteStatusWithWarning(true, err.Error())
|
||||
@@ -203,6 +208,9 @@ func (d *Helper) localStorageFilter(pod corev1.Pod) podDeleteStatus {
|
||||
return makePodDeleteStatusWithError(localStorageFatal)
|
||||
}
|
||||
|
||||
// TODO: this warning gets dropped by subsequent filters;
|
||||
// consider accounting for multiple warning conditions or at least
|
||||
// preserving the last warning message.
|
||||
return makePodDeleteStatusWithWarning(true, localStorageWarning)
|
||||
}
|
||||
|
||||
@@ -221,3 +229,16 @@ func (d *Helper) unreplicatedFilter(pod corev1.Pod) podDeleteStatus {
|
||||
}
|
||||
return makePodDeleteStatusWithError(unmanagedFatal)
|
||||
}
|
||||
|
||||
func shouldSkipPod(pod corev1.Pod, skipDeletedTimeoutSeconds int) bool {
|
||||
return skipDeletedTimeoutSeconds > 0 &&
|
||||
!pod.ObjectMeta.DeletionTimestamp.IsZero() &&
|
||||
int(time.Now().Sub(pod.ObjectMeta.GetDeletionTimestamp().Time).Seconds()) > skipDeletedTimeoutSeconds
|
||||
}
|
||||
|
||||
func (d *Helper) skipDeletedFilter(pod corev1.Pod) podDeleteStatus {
|
||||
if shouldSkipPod(pod, d.SkipWaitForDeleteTimeoutSeconds) {
|
||||
return makePodDeleteStatusSkip()
|
||||
}
|
||||
return makePodDeleteStatusOkay()
|
||||
}
|
||||
|
Reference in New Issue
Block a user