Merge pull request #43892 from bsalamat/sched_conf1

Automatic merge from submit-queue

Scheduler can recieve its policy configuration from a ConfigMap

**What this PR does / why we need it**: This PR adds the ability to scheduler to receive its policy configuration from a ConfigMap. Before this, scheduler could receive its policy config only from a file. The logic to watch the ConfigMap object will be added in a subsequent PR.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```Add the ability to the default scheduler to receive its policy configuration from a ConfigMap object.
```
This commit is contained in:
Kubernetes Submit Queue 2017-04-08 23:19:43 -07:00 committed by GitHub
commit 6e3bd081d5
14 changed files with 348 additions and 23 deletions

View File

@ -521,6 +521,8 @@ pod-running
pod-running-timeout
pods-per-core
policy-config-file
policy-configmap
policy-configmap-namespace
poll-interval
portal-net
prepull-images
@ -683,6 +685,7 @@ upgrade-image
upgrade-target
use-kubernetes-cluster-service
use-kubernetes-version
use-legacy-policy-config
use-service-account-credentials
user-whitelist
use-service-account-credentials

View File

@ -599,6 +599,19 @@ type KubeSchedulerConfiguration struct {
LockObjectNamespace string
// LockObjectName defines the lock object name
LockObjectName string
// PolicyConfigMapName is the name of the ConfigMap object that specifies
// the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler
// uses PolicyConfigFile. If UseLegacyPolicyConfig is false and
// PolicyConfigMapName is not empty, the ConfigMap object with this name must
// exist in PolicyConfigMapNamespace before scheduler initialization.
PolicyConfigMapName string
// PolicyConfigMapNamespace is the namespace where the above policy config map
// is located. If none is provided default system namespace ("kube-system")
// will be used.
PolicyConfigMapNamespace string
// UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and
// to use PolicyConfigFile if available.
UseLegacyPolicyConfig bool
}
// LeaderElectionConfiguration defines the configuration of leader election

View File

@ -164,6 +164,9 @@ func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
if obj.LockObjectName == "" {
obj.LockObjectName = SchedulerDefaultLockObjectName
}
if obj.PolicyConfigMapNamespace == "" {
obj.PolicyConfigMapNamespace = api.NamespaceSystem
}
}
func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {

View File

@ -134,6 +134,19 @@ type KubeSchedulerConfiguration struct {
LockObjectNamespace string `json:"lockObjectNamespace"`
// LockObjectName defines the lock object name
LockObjectName string `json:"lockObjectName"`
// PolicyConfigMapName is the name of the ConfigMap object that specifies
// the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler
// uses PolicyConfigFile. If UseLegacyPolicyConfig is false and
// PolicyConfigMapName is not empty, the ConfigMap object with this name must
// exist in PolicyConfigMapNamespace before scheduler initialization.
PolicyConfigMapName string `json:"policyConfigMapName"`
// PolicyConfigMapNamespace is the namespace where the above policy config map
// is located. If none is provided default system namespace ("kube-system")
// will be used.
PolicyConfigMapNamespace string `json:"policyConfigMapNamespace"`
// UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and
// to use PolicyConfigFile if available.
UseLegacyPolicyConfig bool `json:"useLegacyPolicyConfig"`
}
// HairpinMode denotes how the kubelet should configure networking to handle

View File

@ -139,6 +139,9 @@ func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSche
}
out.LockObjectNamespace = in.LockObjectNamespace
out.LockObjectName = in.LockObjectName
out.PolicyConfigMapName = in.PolicyConfigMapName
out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace
out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig
return nil
}
@ -166,6 +169,9 @@ func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSche
}
out.LockObjectNamespace = in.LockObjectNamespace
out.LockObjectName = in.LockObjectName
out.PolicyConfigMapName = in.PolicyConfigMapName
out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace
out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig
return nil
}

View File

@ -13050,8 +13050,29 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope
Format: "",
},
},
"policyConfigMapName": {
SchemaProps: spec.SchemaProps{
Description: "PolicyConfigMapName is the name of the ConfigMap object that specifies the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler uses PolicyConfigFile. If UseLegacyPolicyConfig is false and PolicyConfigMapName is not empty, the ConfigMap object with this name must exist in PolicyConfigMapNamespace before scheduler initialization.",
Type: []string{"string"},
Format: "",
},
},
"policyConfigMapNamespace": {
SchemaProps: spec.SchemaProps{
Description: "PolicyConfigMapNamespace is the namespace where the above policy config map is located. If none is provided default system namespace (\"kube-system\") will be used.",
Type: []string{"string"},
Format: "",
},
},
"useLegacyPolicyConfig": {
SchemaProps: spec.SchemaProps{
Description: "UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and to use PolicyConfigFile if available.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"port", "address", "algorithmProvider", "policyConfigFile", "enableProfiling", "enableContentionProfiling", "contentType", "kubeAPIQPS", "kubeAPIBurst", "schedulerName", "hardPodAffinitySymmetricWeight", "failureDomains", "leaderElection", "lockObjectNamespace", "lockObjectName"},
Required: []string{"port", "address", "algorithmProvider", "policyConfigFile", "enableProfiling", "enableContentionProfiling", "contentType", "kubeAPIQPS", "kubeAPIBurst", "schedulerName", "hardPodAffinitySymmetricWeight", "failureDomains", "leaderElection", "lockObjectNamespace", "lockObjectName", "policyConfigMapName", "policyConfigMapNamespace", "useLegacyPolicyConfig"},
},
},
Dependencies: []string{

View File

@ -26,6 +26,7 @@ import (
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1"
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@ -71,8 +72,8 @@ func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
return cli, nil
}
// createScheduler encapsulates the entire creation of a runnable scheduler.
func createScheduler(
// CreateScheduler encapsulates the entire creation of a runnable scheduler.
func CreateScheduler(
s *options.SchedulerServer,
kubecli *clientset.Clientset,
nodeInformer coreinformers.NodeInformer,
@ -101,38 +102,91 @@ func createScheduler(
configurator = &schedulerConfigurator{
configurator,
s.PolicyConfigFile,
s.AlgorithmProvider}
s.AlgorithmProvider,
s.PolicyConfigMapName,
s.PolicyConfigMapNamespace,
s.UseLegacyPolicyConfig,
}
return scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) {
cfg.Recorder = recorder
})
}
// schedulerConfigurator is an interface wrapper that provides default Configuration creation based on user
// provided config file.
// schedulerConfigurator is an interface wrapper that provides a way to create
// a scheduler from a user provided config file or ConfigMap object.
type schedulerConfigurator struct {
scheduler.Configurator
policyFile string
algorithmProvider string
policyFile string
algorithmProvider string
policyConfigMap string
policyConfigMapNamespace string
useLegacyPolicyConfig bool
}
// Create implements the interface for the Configurator, hence it is exported even through the struct is not.
// getSchedulerPolicyConfig finds and decodes scheduler's policy config. If no
// such policy is found, it returns nil, nil.
func (sc schedulerConfigurator) getSchedulerPolicyConfig() (*schedulerapi.Policy, error) {
var configData []byte
var policyConfigMapFound bool
var policy schedulerapi.Policy
// If not in legacy mode, try to find policy ConfigMap.
if !sc.useLegacyPolicyConfig && len(sc.policyConfigMap) != 0 {
namespace := sc.policyConfigMapNamespace
policyConfigMap, err := sc.GetClient().CoreV1().ConfigMaps(namespace).Get(sc.policyConfigMap, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting scheduler policy ConfigMap: %v.", err)
}
if policyConfigMap != nil {
// We expect the first element in the Data member of the ConfigMap to
// contain the policy config.
if len(policyConfigMap.Data) != 1 {
return nil, fmt.Errorf("ConfigMap %v has %v entries in its 'Data'. It must have only one.", sc.policyConfigMap, len(policyConfigMap.Data))
}
policyConfigMapFound = true
// This loop should iterate only once, as we have already checked the length of Data.
for _, val := range policyConfigMap.Data {
glog.V(5).Infof("Scheduler policy ConfigMap: %v", val)
configData = []byte(val)
}
}
}
// If we are in legacy mode or ConfigMap name is empty, try to use policy
// config file.
if !policyConfigMapFound {
if _, err := os.Stat(sc.policyFile); err != nil {
// No config file is found.
return nil, nil
}
var err error
configData, err = ioutil.ReadFile(sc.policyFile)
if err != nil {
return nil, fmt.Errorf("unable to read policy config: %v", err)
}
}
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
return nil, fmt.Errorf("invalid configuration: %v", err)
}
return &policy, nil
}
// Create implements the interface for the Configurator, hence it is exported
// even though the struct is not.
func (sc schedulerConfigurator) Create() (*scheduler.Config, error) {
if _, err := os.Stat(sc.policyFile); err != nil {
policy, err := sc.getSchedulerPolicyConfig()
if err != nil {
return nil, err
}
// If no policy is found, create scheduler from algorithm provider.
if policy == nil {
if sc.Configurator != nil {
return sc.Configurator.CreateFromProvider(sc.algorithmProvider)
}
return nil, fmt.Errorf("Configurator was nil")
}
// policy file is valid, try to create a configuration from it.
var policy schedulerapi.Policy
configData, err := ioutil.ReadFile(sc.policyFile)
if err != nil {
return nil, fmt.Errorf("unable to read policy config: %v", err)
}
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
return nil, fmt.Errorf("invalid configuration: %v", err)
}
return sc.CreateFromConfig(policy)
return sc.CreateFromConfig(*policy)
}

View File

@ -63,7 +63,10 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on")
fs.StringVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.AlgorithmProvider, "algorithm-provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders())
fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration")
fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true")
fs.StringVar(&s.PolicyConfigMapName, "policy-configmap", s.PolicyConfigMapName, "Name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config==false")
fs.StringVar(&s.PolicyConfigMapNamespace, "policy-configmap-namespace", s.PolicyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.")
fs.BoolVar(&s.UseLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
@ -80,6 +83,5 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.")
fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.")
leaderelection.BindFlags(&s.LeaderElection, fs)
utilfeature.DefaultFeatureGate.AddFlag(fs)
}

View File

@ -73,7 +73,7 @@ func Run(s *options.SchedulerServer) error {
informerFactory := informers.NewSharedInformerFactory(kubecli, 0)
sched, err := createScheduler(
sched, err := CreateScheduler(
s,
kubecli,
informerFactory.Core().V1().Nodes(),

View File

@ -41,4 +41,10 @@ type SchedulerExtender interface {
// onto machines.
type ScheduleAlgorithm interface {
Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error)
// Predicates() returns a pointer to a map of predicate functions. This is
// exposed for testing.
Predicates() map[string]FitPredicate
// Prioritizers returns a slice of priority config. This is exposed for
// testing.
Prioritizers() []PriorityConfig
}

View File

@ -127,6 +127,18 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
return g.selectHost(priorityList)
}
// Prioritizers returns a slice containing all the scheduler's priority
// functions and their config. It is exposed for testing only.
func (g *genericScheduler) Prioritizers() []algorithm.PriorityConfig {
return g.prioritizers
}
// Predicates returns a map containing all the scheduler's predicate
// functions. It is exposed for testing only.
func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate {
return g.predicates
}
// selectHost takes a prioritized list of nodes and then picks one
// in a round-robin manner from the nodes that had the highest score.
func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) {

View File

@ -155,6 +155,11 @@ func (sched *Scheduler) Run() {
go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything)
}
// Config return scheduler's config pointer. It is exposed for testing purposes.
func (sched *Scheduler) Config() *Config {
return sched.config
}
func (sched *Scheduler) scheduleOne() {
pod := sched.config.NextPod()
if pod.DeletionTimestamp != nil {

View File

@ -97,6 +97,13 @@ func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string,
return es.machine, es.err
}
func (es mockScheduler) Predicates() map[string]algorithm.FitPredicate {
return nil
}
func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig {
return nil
}
func TestScheduler(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(t.Logf).Stop()

View File

@ -39,9 +39,14 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
"k8s.io/kubernetes/plugin/pkg/scheduler"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework"
)
@ -53,6 +58,181 @@ type nodeStateManager struct {
makeUnSchedulable nodeMutationFunc
}
func PredicateOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil
}
func PredicateTwo(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil
}
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil
}
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil
}
// TestSchedulerCreationFromConfigMap verifies that scheduler can be created
// from configurations provided by a ConfigMap object and then verifies that the
// configuration is applied correctly.
func TestSchedulerCreationFromConfigMap(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("configmap", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
// Pre-register some predicate and priority functions
factory.RegisterFitPredicate("PredicateOne", PredicateOne)
factory.RegisterFitPredicate("PredicateTwo", PredicateTwo)
factory.RegisterPriorityFunction("PriorityOne", PriorityOne, 1)
factory.RegisterPriorityFunction("PriorityTwo", PriorityTwo, 1)
// Add a ConfigMap object.
configPolicyName := "scheduler-custom-policy-config"
policyConfigMap := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
Data: map[string]string{
"scheduler-policy-config.json": `{
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
{"name" : "PredicateOne"},
{"name" : "PredicateTwo"}
],
"priorities" : [
{"name" : "PriorityOne", "weight" : 1},
{"name" : "PriorityTwo", "weight" : 5}
]
}`,
},
}
policyConfigMap.APIVersion = api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()
clientSet.Core().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
ss := options.NewSchedulerServer()
ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight
ss.PolicyConfigMapName = configPolicyName
sched, err := app.CreateScheduler(ss, clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
)
if err != nil {
t.Fatalf("Error creating scheduler: %v", err)
}
// Verify that the config is applied correctly.
schedPredicates := sched.Config().Algorithm.Predicates()
schedPrioritizers := sched.Config().Algorithm.Prioritizers()
if len(schedPredicates) != 2 || len(schedPrioritizers) != 2 {
t.Errorf("Unexpected number of predicates or priority functions. Number of predicates: %v, number of prioritizers: %v", len(schedPredicates), len(schedPrioritizers))
}
// Check a predicate and a priority function.
if schedPredicates["PredicateTwo"] == nil {
t.Errorf("Expected to have a PodFitsHostPorts predicate.")
}
if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 {
t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight)
}
defer close(sched.Config().StopEverything)
}
// TestSchedulerCreationFromNonExistentConfigMap ensures that creation of the
// scheduler from a non-existent ConfigMap fails.
func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("configmap", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
ss := options.NewSchedulerServer()
ss.PolicyConfigMapName = "non-existent-config"
_, err := app.CreateScheduler(ss, clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
)
if err == nil {
t.Fatalf("Creation of scheduler didn't fail while the policy ConfigMap didn't exist.")
}
}
// TestSchedulerCreationInLegacyMode ensures that creation of the scheduler
// works fine when legacy mode is enabled.
func TestSchedulerCreationInLegacyMode(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("configmap", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
ss := options.NewSchedulerServer()
ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight
ss.PolicyConfigMapName = "non-existent-configmap"
ss.UseLegacyPolicyConfig = true
sched, err := app.CreateScheduler(ss, clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
)
if err != nil {
t.Fatalf("Creation of scheduler in legacy mode failed: %v", err)
}
informerFactory.Start(sched.Config().StopEverything)
defer close(sched.Config().StopEverything)
sched.Run()
DoTestUnschedulableNodes(t, clientSet, ns, informerFactory.Core().V1().Nodes().Lister())
}
func TestUnschedulableNodes(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()