diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 6a7eb8d6240..2a3c6e36090 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -521,6 +521,8 @@ pod-running pod-running-timeout pods-per-core policy-config-file +policy-configmap +policy-configmap-namespace poll-interval portal-net prepull-images @@ -683,6 +685,7 @@ upgrade-image upgrade-target use-kubernetes-cluster-service use-kubernetes-version +use-legacy-policy-config use-service-account-credentials user-whitelist use-service-account-credentials diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index 78abfe24611..7b0f935cd79 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -599,6 +599,19 @@ type KubeSchedulerConfiguration struct { LockObjectNamespace string // LockObjectName defines the lock object name LockObjectName string + // PolicyConfigMapName is the name of the ConfigMap object that specifies + // the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler + // uses PolicyConfigFile. If UseLegacyPolicyConfig is false and + // PolicyConfigMapName is not empty, the ConfigMap object with this name must + // exist in PolicyConfigMapNamespace before scheduler initialization. + PolicyConfigMapName string + // PolicyConfigMapNamespace is the namespace where the above policy config map + // is located. If none is provided default system namespace ("kube-system") + // will be used. + PolicyConfigMapNamespace string + // UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and + // to use PolicyConfigFile if available. + UseLegacyPolicyConfig bool } // LeaderElectionConfiguration defines the configuration of leader election diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index bb99f8f3cd9..b5c7c3b3433 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -164,6 +164,9 @@ func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { if obj.LockObjectName == "" { obj.LockObjectName = SchedulerDefaultLockObjectName } + if obj.PolicyConfigMapNamespace == "" { + obj.PolicyConfigMapNamespace = api.NamespaceSystem + } } func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index 7ef07c29fb8..48b487c4bad 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -134,6 +134,19 @@ type KubeSchedulerConfiguration struct { LockObjectNamespace string `json:"lockObjectNamespace"` // LockObjectName defines the lock object name LockObjectName string `json:"lockObjectName"` + // PolicyConfigMapName is the name of the ConfigMap object that specifies + // the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler + // uses PolicyConfigFile. If UseLegacyPolicyConfig is false and + // PolicyConfigMapName is not empty, the ConfigMap object with this name must + // exist in PolicyConfigMapNamespace before scheduler initialization. + PolicyConfigMapName string `json:"policyConfigMapName"` + // PolicyConfigMapNamespace is the namespace where the above policy config map + // is located. If none is provided default system namespace ("kube-system") + // will be used. + PolicyConfigMapNamespace string `json:"policyConfigMapNamespace"` + // UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and + // to use PolicyConfigFile if available. + UseLegacyPolicyConfig bool `json:"useLegacyPolicyConfig"` } // HairpinMode denotes how the kubelet should configure networking to handle diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 83ae17ffe9b..5dfdc9e7b3e 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -139,6 +139,9 @@ func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSche } out.LockObjectNamespace = in.LockObjectNamespace out.LockObjectName = in.LockObjectName + out.PolicyConfigMapName = in.PolicyConfigMapName + out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace + out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig return nil } @@ -166,6 +169,9 @@ func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSche } out.LockObjectNamespace = in.LockObjectNamespace out.LockObjectName = in.LockObjectName + out.PolicyConfigMapName = in.PolicyConfigMapName + out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace + out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig return nil } diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 97740579ed1..f6422ea07c7 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -13050,8 +13050,29 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope Format: "", }, }, + "policyConfigMapName": { + SchemaProps: spec.SchemaProps{ + Description: "PolicyConfigMapName is the name of the ConfigMap object that specifies the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler uses PolicyConfigFile. If UseLegacyPolicyConfig is false and PolicyConfigMapName is not empty, the ConfigMap object with this name must exist in PolicyConfigMapNamespace before scheduler initialization.", + Type: []string{"string"}, + Format: "", + }, + }, + "policyConfigMapNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "PolicyConfigMapNamespace is the namespace where the above policy config map is located. If none is provided default system namespace (\"kube-system\") will be used.", + Type: []string{"string"}, + Format: "", + }, + }, + "useLegacyPolicyConfig": { + SchemaProps: spec.SchemaProps{ + Description: "UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and to use PolicyConfigFile if available.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, - Required: []string{"port", "address", "algorithmProvider", "policyConfigFile", "enableProfiling", "enableContentionProfiling", "contentType", "kubeAPIQPS", "kubeAPIBurst", "schedulerName", "hardPodAffinitySymmetricWeight", "failureDomains", "leaderElection", "lockObjectNamespace", "lockObjectName"}, + Required: []string{"port", "address", "algorithmProvider", "policyConfigFile", "enableProfiling", "enableContentionProfiling", "contentType", "kubeAPIQPS", "kubeAPIBurst", "schedulerName", "hardPodAffinitySymmetricWeight", "failureDomains", "leaderElection", "lockObjectNamespace", "lockObjectName", "policyConfigMapName", "policyConfigMapNamespace", "useLegacyPolicyConfig"}, }, }, Dependencies: []string{ diff --git a/plugin/cmd/kube-scheduler/app/configurator.go b/plugin/cmd/kube-scheduler/app/configurator.go index d2900f93bc1..1d0d9e08eb7 100644 --- a/plugin/cmd/kube-scheduler/app/configurator.go +++ b/plugin/cmd/kube-scheduler/app/configurator.go @@ -26,6 +26,7 @@ import ( extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1" "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -71,8 +72,8 @@ func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) { return cli, nil } -// createScheduler encapsulates the entire creation of a runnable scheduler. -func createScheduler( +// CreateScheduler encapsulates the entire creation of a runnable scheduler. +func CreateScheduler( s *options.SchedulerServer, kubecli *clientset.Clientset, nodeInformer coreinformers.NodeInformer, @@ -101,38 +102,91 @@ func createScheduler( configurator = &schedulerConfigurator{ configurator, s.PolicyConfigFile, - s.AlgorithmProvider} + s.AlgorithmProvider, + s.PolicyConfigMapName, + s.PolicyConfigMapNamespace, + s.UseLegacyPolicyConfig, + } return scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { cfg.Recorder = recorder }) } -// schedulerConfigurator is an interface wrapper that provides default Configuration creation based on user -// provided config file. +// schedulerConfigurator is an interface wrapper that provides a way to create +// a scheduler from a user provided config file or ConfigMap object. type schedulerConfigurator struct { scheduler.Configurator - policyFile string - algorithmProvider string + policyFile string + algorithmProvider string + policyConfigMap string + policyConfigMapNamespace string + useLegacyPolicyConfig bool } -// Create implements the interface for the Configurator, hence it is exported even through the struct is not. +// getSchedulerPolicyConfig finds and decodes scheduler's policy config. If no +// such policy is found, it returns nil, nil. +func (sc schedulerConfigurator) getSchedulerPolicyConfig() (*schedulerapi.Policy, error) { + var configData []byte + var policyConfigMapFound bool + var policy schedulerapi.Policy + + // If not in legacy mode, try to find policy ConfigMap. + if !sc.useLegacyPolicyConfig && len(sc.policyConfigMap) != 0 { + namespace := sc.policyConfigMapNamespace + policyConfigMap, err := sc.GetClient().CoreV1().ConfigMaps(namespace).Get(sc.policyConfigMap, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("Error getting scheduler policy ConfigMap: %v.", err) + } + if policyConfigMap != nil { + // We expect the first element in the Data member of the ConfigMap to + // contain the policy config. + if len(policyConfigMap.Data) != 1 { + return nil, fmt.Errorf("ConfigMap %v has %v entries in its 'Data'. It must have only one.", sc.policyConfigMap, len(policyConfigMap.Data)) + } + policyConfigMapFound = true + // This loop should iterate only once, as we have already checked the length of Data. + for _, val := range policyConfigMap.Data { + glog.V(5).Infof("Scheduler policy ConfigMap: %v", val) + configData = []byte(val) + } + } + } + + // If we are in legacy mode or ConfigMap name is empty, try to use policy + // config file. + if !policyConfigMapFound { + if _, err := os.Stat(sc.policyFile); err != nil { + // No config file is found. + return nil, nil + } + var err error + configData, err = ioutil.ReadFile(sc.policyFile) + if err != nil { + return nil, fmt.Errorf("unable to read policy config: %v", err) + } + } + + if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { + return nil, fmt.Errorf("invalid configuration: %v", err) + } + return &policy, nil +} + +// Create implements the interface for the Configurator, hence it is exported +// even though the struct is not. func (sc schedulerConfigurator) Create() (*scheduler.Config, error) { - if _, err := os.Stat(sc.policyFile); err != nil { + policy, err := sc.getSchedulerPolicyConfig() + if err != nil { + return nil, err + } + // If no policy is found, create scheduler from algorithm provider. + if policy == nil { if sc.Configurator != nil { return sc.Configurator.CreateFromProvider(sc.algorithmProvider) } return nil, fmt.Errorf("Configurator was nil") } - // policy file is valid, try to create a configuration from it. - var policy schedulerapi.Policy - configData, err := ioutil.ReadFile(sc.policyFile) - if err != nil { - return nil, fmt.Errorf("unable to read policy config: %v", err) - } - if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { - return nil, fmt.Errorf("invalid configuration: %v", err) - } - return sc.CreateFromConfig(policy) + return sc.CreateFromConfig(*policy) } diff --git a/plugin/cmd/kube-scheduler/app/options/options.go b/plugin/cmd/kube-scheduler/app/options/options.go index eb13671f49c..535c2696df0 100644 --- a/plugin/cmd/kube-scheduler/app/options/options.go +++ b/plugin/cmd/kube-scheduler/app/options/options.go @@ -63,7 +63,10 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { fs.Int32Var(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on") fs.StringVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") fs.StringVar(&s.AlgorithmProvider, "algorithm-provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) - fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration") + fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true") + fs.StringVar(&s.PolicyConfigMapName, "policy-configmap", s.PolicyConfigMapName, "Name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config==false") + fs.StringVar(&s.PolicyConfigMapNamespace, "policy-configmap-namespace", s.PolicyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.") + fs.BoolVar(&s.UseLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file") fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") @@ -80,6 +83,5 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.") leaderelection.BindFlags(&s.LeaderElection, fs) - utilfeature.DefaultFeatureGate.AddFlag(fs) } diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index fd70e7cf22e..c164a6283d5 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -73,7 +73,7 @@ func Run(s *options.SchedulerServer) error { informerFactory := informers.NewSharedInformerFactory(kubecli, 0) - sched, err := createScheduler( + sched, err := CreateScheduler( s, kubecli, informerFactory.Core().V1().Nodes(), diff --git a/plugin/pkg/scheduler/algorithm/scheduler_interface.go b/plugin/pkg/scheduler/algorithm/scheduler_interface.go index 09ec312caf8..ecdf935f85c 100644 --- a/plugin/pkg/scheduler/algorithm/scheduler_interface.go +++ b/plugin/pkg/scheduler/algorithm/scheduler_interface.go @@ -41,4 +41,10 @@ type SchedulerExtender interface { // onto machines. type ScheduleAlgorithm interface { Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error) + // Predicates() returns a pointer to a map of predicate functions. This is + // exposed for testing. + Predicates() map[string]FitPredicate + // Prioritizers returns a slice of priority config. This is exposed for + // testing. + Prioritizers() []PriorityConfig } diff --git a/plugin/pkg/scheduler/core/generic_scheduler.go b/plugin/pkg/scheduler/core/generic_scheduler.go index 928b7ea7c75..a63a376fdb7 100644 --- a/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/plugin/pkg/scheduler/core/generic_scheduler.go @@ -127,6 +127,18 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister return g.selectHost(priorityList) } +// Prioritizers returns a slice containing all the scheduler's priority +// functions and their config. It is exposed for testing only. +func (g *genericScheduler) Prioritizers() []algorithm.PriorityConfig { + return g.prioritizers +} + +// Predicates returns a map containing all the scheduler's predicate +// functions. It is exposed for testing only. +func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate { + return g.predicates +} + // selectHost takes a prioritized list of nodes and then picks one // in a round-robin manner from the nodes that had the highest score. func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) { diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index d53f35baf29..c3cc7bf40fb 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -155,6 +155,11 @@ func (sched *Scheduler) Run() { go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything) } +// Config return scheduler's config pointer. It is exposed for testing purposes. +func (sched *Scheduler) Config() *Config { + return sched.config +} + func (sched *Scheduler) scheduleOne() { pod := sched.config.NextPod() if pod.DeletionTimestamp != nil { diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index f438bd11eef..812fcb7c060 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -97,6 +97,13 @@ func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string, return es.machine, es.err } +func (es mockScheduler) Predicates() map[string]algorithm.FitPredicate { + return nil +} +func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig { + return nil +} + func TestScheduler(t *testing.T) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(t.Logf).Stop() diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 7a98b3cd181..1fb14935f1b 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -39,9 +39,14 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" + "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" + "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" "k8s.io/kubernetes/plugin/pkg/scheduler" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/factory" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" e2e "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/integration/framework" ) @@ -53,6 +58,181 @@ type nodeStateManager struct { makeUnSchedulable nodeMutationFunc } +func PredicateOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return true, nil, nil +} + +func PredicateTwo(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return true, nil, nil +} + +func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { + return []schedulerapi.HostPriority{}, nil +} + +func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { + return []schedulerapi.HostPriority{}, nil +} + +// TestSchedulerCreationFromConfigMap verifies that scheduler can be created +// from configurations provided by a ConfigMap object and then verifies that the +// configuration is applied correctly. +func TestSchedulerCreationFromConfigMap(t *testing.T) { + _, s := framework.RunAMaster(nil) + defer s.Close() + + ns := framework.CreateTestingNamespace("configmap", s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) + defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + informerFactory := informers.NewSharedInformerFactory(clientSet, 0) + + // Pre-register some predicate and priority functions + factory.RegisterFitPredicate("PredicateOne", PredicateOne) + factory.RegisterFitPredicate("PredicateTwo", PredicateTwo) + factory.RegisterPriorityFunction("PriorityOne", PriorityOne, 1) + factory.RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1) + + // Add a ConfigMap object. + configPolicyName := "scheduler-custom-policy-config" + policyConfigMap := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName}, + Data: map[string]string{ + "scheduler-policy-config.json": `{ + "kind" : "Policy", + "apiVersion" : "v1", + "predicates" : [ + {"name" : "PredicateOne"}, + {"name" : "PredicateTwo"} + ], + "priorities" : [ + {"name" : "PriorityOne", "weight" : 1}, + {"name" : "PriorityTwo", "weight" : 5} + ] + }`, + }, + } + + policyConfigMap.APIVersion = api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String() + clientSet.Core().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap) + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) + ss := options.NewSchedulerServer() + ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight + ss.PolicyConfigMapName = configPolicyName + sched, err := app.CreateScheduler(ss, clientSet, + informerFactory.Core().V1().Nodes(), + informerFactory.Core().V1().PersistentVolumes(), + informerFactory.Core().V1().PersistentVolumeClaims(), + informerFactory.Core().V1().ReplicationControllers(), + informerFactory.Extensions().V1beta1().ReplicaSets(), + informerFactory.Apps().V1beta1().StatefulSets(), + informerFactory.Core().V1().Services(), + eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}), + ) + + if err != nil { + t.Fatalf("Error creating scheduler: %v", err) + } + + // Verify that the config is applied correctly. + schedPredicates := sched.Config().Algorithm.Predicates() + schedPrioritizers := sched.Config().Algorithm.Prioritizers() + if len(schedPredicates) != 2 || len(schedPrioritizers) != 2 { + t.Errorf("Unexpected number of predicates or priority functions. Number of predicates: %v, number of prioritizers: %v", len(schedPredicates), len(schedPrioritizers)) + } + // Check a predicate and a priority function. + if schedPredicates["PredicateTwo"] == nil { + t.Errorf("Expected to have a PodFitsHostPorts predicate.") + } + if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 { + t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight) + } + + defer close(sched.Config().StopEverything) +} + +// TestSchedulerCreationFromNonExistentConfigMap ensures that creation of the +// scheduler from a non-existent ConfigMap fails. +func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) { + _, s := framework.RunAMaster(nil) + defer s.Close() + + ns := framework.CreateTestingNamespace("configmap", s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) + defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + + informerFactory := informers.NewSharedInformerFactory(clientSet, 0) + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) + + ss := options.NewSchedulerServer() + ss.PolicyConfigMapName = "non-existent-config" + + _, err := app.CreateScheduler(ss, clientSet, + informerFactory.Core().V1().Nodes(), + informerFactory.Core().V1().PersistentVolumes(), + informerFactory.Core().V1().PersistentVolumeClaims(), + informerFactory.Core().V1().ReplicationControllers(), + informerFactory.Extensions().V1beta1().ReplicaSets(), + informerFactory.Apps().V1beta1().StatefulSets(), + informerFactory.Core().V1().Services(), + eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}), + ) + + if err == nil { + t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.") + } +} + +// TestSchedulerCreationInLegacyMode ensures that creation of the scheduler +// works fine when legacy mode is enabled. +func TestSchedulerCreationInLegacyMode(t *testing.T) { + _, s := framework.RunAMaster(nil) + defer s.Close() + + ns := framework.CreateTestingNamespace("configmap", s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) + defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + informerFactory := informers.NewSharedInformerFactory(clientSet, 0) + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")}) + + ss := options.NewSchedulerServer() + ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight + ss.PolicyConfigMapName = "non-existent-configmap" + ss.UseLegacyPolicyConfig = true + + sched, err := app.CreateScheduler(ss, clientSet, + informerFactory.Core().V1().Nodes(), + informerFactory.Core().V1().PersistentVolumes(), + informerFactory.Core().V1().PersistentVolumeClaims(), + informerFactory.Core().V1().ReplicationControllers(), + informerFactory.Extensions().V1beta1().ReplicaSets(), + informerFactory.Apps().V1beta1().StatefulSets(), + informerFactory.Core().V1().Services(), + eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}), + ) + + if err != nil { + t.Fatalf("Creation of scheduler in legacy mode failed: %v", err) + } + + informerFactory.Start(sched.Config().StopEverything) + defer close(sched.Config().StopEverything) + + sched.Run() + DoTestUnschedulableNodes(t, clientSet, ns, informerFactory.Core().V1().Nodes().Lister()) +} + func TestUnschedulableNodes(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close()