mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
cmd/kube-controller-manager
This commit is contained in:
@@ -23,13 +23,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -95,17 +95,17 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
}
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemon-set"}),
|
||||
},
|
||||
burstReplicas: BurstReplicas,
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
@@ -239,7 +239,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
|
||||
ds, ok := obj.(*extensions.DaemonSet)
|
||||
@@ -272,7 +272,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.Daemo
|
||||
}
|
||||
|
||||
// isCacheValid check if the cache is valid
|
||||
func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions.DaemonSet) bool {
|
||||
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
|
||||
_, exists, err := dsc.dsStore.Get(cachedDS)
|
||||
// ds has been deleted or updated, cache is invalid
|
||||
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
|
||||
@@ -283,7 +283,7 @@ func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions
|
||||
|
||||
// isDaemonSetMatch take a Pod and DaemonSet, return whether the Pod and DaemonSet are matching
|
||||
// TODO(mqliang): This logic is a copy from GetPodDaemonSets(), remove the duplication
|
||||
func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool {
|
||||
func isDaemonSetMatch(pod *v1.Pod, ds *extensions.DaemonSet) bool {
|
||||
if ds.Namespace != pod.Namespace {
|
||||
return false
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
if ds := dsc.getPodDaemonSet(pod); ds != nil {
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
@@ -316,10 +316,10 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what sets manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new set. old and cur must be *api.Pod types.
|
||||
// and new set. old and cur must be *v1.Pod types.
|
||||
func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -342,7 +342,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
@@ -354,7 +354,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
@@ -379,7 +379,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
node := obj.(*api.Node)
|
||||
node := obj.(*v1.Node)
|
||||
for i := range dsList.Items {
|
||||
ds := &dsList.Items[i]
|
||||
shouldEnqueue := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
@@ -390,8 +390,8 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
oldNode := old.(*api.Node)
|
||||
curNode := cur.(*api.Node)
|
||||
oldNode := old.(*v1.Node)
|
||||
curNode := cur.(*v1.Node)
|
||||
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) {
|
||||
// If node labels didn't change, we can ignore this update.
|
||||
return
|
||||
@@ -412,8 +412,8 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*api.Pod, error) {
|
||||
nodeToDaemonPods := make(map[string][]*api.Pod)
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -585,7 +585,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
// Sort the daemon pods by creation time, so the the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
if api.IsPodReady(daemonPods[0]) {
|
||||
if v1.IsPodReady(daemonPods[0]) {
|
||||
numberReady++
|
||||
}
|
||||
}
|
||||
@@ -623,7 +623,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
|
||||
everything := unversioned.LabelSelector{}
|
||||
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -644,7 +644,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return dsc.updateDaemonSetStatus(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) bool {
|
||||
// If the daemon set specifies a node name, check that it matches with node.Name.
|
||||
if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
|
||||
return false
|
||||
@@ -652,23 +652,23 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
|
||||
|
||||
// TODO: Move it to the predicates
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue {
|
||||
if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
newPod := &api.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod.Namespace = ds.Namespace
|
||||
newPod.Spec.NodeName = node.Name
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for _, m := range dsc.podStore.Indexer.List() {
|
||||
pod := m.(*api.Pod)
|
||||
pod := m.(*v1.Pod)
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
continue
|
||||
}
|
||||
// ignore pods that belong to the daemonset when taking into account whether
|
||||
@@ -689,10 +689,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
|
||||
glog.V(4).Infof("GeneralPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
switch reason := r.(type) {
|
||||
case *predicates.InsufficientResourceError:
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
|
||||
case *predicates.PredicateFailureError:
|
||||
if reason == predicates.ErrPodNotFitsHostPorts {
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -712,7 +712,7 @@ func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
type podByCreationTimestamp []*api.Pod
|
||||
type podByCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o podByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -20,14 +20,14 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -55,46 +55,46 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSDefault,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newNode(name string, label map[string]string) *api.Node {
|
||||
return &api.Node{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -106,28 +106,28 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Labels: label,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSDefault,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
}
|
||||
api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta)
|
||||
v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
|
||||
return pod
|
||||
}
|
||||
|
||||
@@ -138,8 +138,8 @@ func addPods(podStore cache.Store, nodeName string, label map[string]string, num
|
||||
}
|
||||
|
||||
func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
||||
|
||||
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
@@ -212,8 +212,8 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-ready", nil)
|
||||
node.Status.Conditions = []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionFalse},
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -225,29 +225,29 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-enough-disk", nil)
|
||||
node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}
|
||||
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
||||
manager.nodeStore.Add(node)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
func resourcePodSpec(nodeName, memory, cpu string) api.PodSpec {
|
||||
return api.PodSpec{
|
||||
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: allocatableResources(memory, cpu),
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) api.ResourceList {
|
||||
return api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse(memory),
|
||||
api.ResourceCPU: resource.MustParse(cpu),
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -273,9 +273,9 @@ func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
Status: api.PodStatus{Phase: api.PodSucceeded},
|
||||
Status: v1.PodStatus{Phase: v1.PodSucceeded},
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
@@ -290,7 +290,7 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -306,7 +306,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -319,10 +319,10 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
|
||||
// DaemonSets should not place onto nodes that would cause port conflicts
|
||||
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
podSpec := v1.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
}},
|
||||
@@ -330,7 +330,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
|
||||
@@ -345,10 +345,10 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
//
|
||||
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
|
||||
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
podSpec := v1.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
}},
|
||||
@@ -356,10 +356,10 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: podSpec,
|
||||
})
|
||||
@@ -371,18 +371,18 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
|
||||
// DaemonSets should place onto nodes that would not cause port conflicts
|
||||
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
podSpec1 := api.PodSpec{
|
||||
podSpec1 := v1.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 6661,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
podSpec2 := api.PodSpec{
|
||||
podSpec2 := v1.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 6662,
|
||||
}},
|
||||
}},
|
||||
@@ -390,7 +390,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("no-port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec1,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -406,12 +406,12 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
||||
// Create pod not controlled by a daemonset.
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"bang": "boom"},
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
},
|
||||
})
|
||||
@@ -554,7 +554,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
daemon := newDaemonSet("foo")
|
||||
affinity := map[string]string{
|
||||
api.AffinityAnnotationKey: fmt.Sprintf(`
|
||||
v1.AffinityAnnotationKey: fmt.Sprintf(`
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
@@ -586,7 +586,7 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
selector, _ := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector)
|
||||
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
|
||||
for _, pod := range daemonPods {
|
||||
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue}
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user