Changes for converting node to v1 in drain

This commit is contained in:
ravisantoshgudimetla
2017-06-30 08:58:57 -04:00
parent 8bece8ec41
commit db120eb8ca
2 changed files with 57 additions and 38 deletions

View File

@@ -20,8 +20,8 @@ import (
"errors"
"fmt"
"io"
"k8s.io/apimachinery/pkg/util/json"
"math"
"reflect"
"strings"
"time"
@@ -33,8 +33,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@@ -43,7 +46,6 @@ import (
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/i18n"
)
@@ -84,7 +86,6 @@ const (
kLocalStorageWarning = "Deleting pods with local storage"
kUnmanagedFatal = "pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override)"
kUnmanagedWarning = "Deleting pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet"
kMaxNodeUpdateRetry = 10
)
var (
@@ -353,7 +354,7 @@ func (o *DrainOptions) daemonsetFilter(pod api.Pod) (bool, *warning, *fatal) {
}
func mirrorPodFilter(pod api.Pod) (bool, *warning, *fatal) {
if _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]; found {
if _, found := pod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found {
return false, nil, nil
}
return true, nil, nil
@@ -621,27 +622,28 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error {
}
if o.nodeInfo.Mapping.GroupVersionKind.Kind == "Node" {
unsched := reflect.ValueOf(o.nodeInfo.Object).Elem().FieldByName("Spec").FieldByName("Unschedulable")
if unsched.Bool() == desired {
obj, err := o.nodeInfo.Mapping.ConvertToVersion(o.nodeInfo.Object, o.nodeInfo.Mapping.GroupVersionKind.GroupVersion())
if err != nil {
return err
}
oldData, err := json.Marshal(obj)
node, ok := obj.(*corev1.Node)
if !ok {
return fmt.Errorf("unexpected Type%T, expected Node", obj)
}
unsched := node.Spec.Unschedulable
if unsched == desired {
cmdutil.PrintSuccess(o.mapper, false, o.Out, o.nodeInfo.Mapping.Resource, o.nodeInfo.Name, false, already(desired))
} else {
helper := resource.NewHelper(o.restClient, o.nodeInfo.Mapping)
unsched.SetBool(desired)
node.Spec.Unschedulable = desired
var err error
for i := 0; i < kMaxNodeUpdateRetry; i++ {
// We don't care about what previous versions may exist, we always want
// to overwrite, and Replace always sets current ResourceVersion if version is "".
helper.Versioner.SetResourceVersion(o.nodeInfo.Object, "")
_, err = helper.Replace(cmdNamespace, o.nodeInfo.Name, true, o.nodeInfo.Object)
if err != nil {
if !apierrors.IsConflict(err) {
return err
}
} else {
break
}
// It's a race, no need to sleep
newData, err := json.Marshal(obj)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
if err != nil {
return err
}
_, err = helper.Patch(cmdNamespace, o.nodeInfo.Name, types.StrategicMergePatchType, patchBytes)
if err != nil {
return err
}