mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-11 14:11:14 +00:00
optionally ignore preferred terms of existing pods unless incoming pod
has inter-pod affinities
This commit is contained in:
@@ -389,6 +389,27 @@ profiles:
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// high throughput profile config
|
||||
highThroughputProfileConfig := filepath.Join(tmpDir, "high-throughput.yaml")
|
||||
if err := os.WriteFile(highThroughputProfileConfig, []byte(fmt.Sprintf(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: '%s'
|
||||
profiles:
|
||||
- schedulerName: "high-throughput-profile"
|
||||
plugins:
|
||||
preScore:
|
||||
enabled:
|
||||
- name: InterPodAffinity
|
||||
pluginConfig:
|
||||
- name: InterPodAffinity
|
||||
args:
|
||||
ignorePreferredTermsOfExistingPods: true
|
||||
`, configKubeconfig)), os.FileMode(0600)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Insulate this test from picking up in-cluster config when run inside a pod
|
||||
// We can't assume we have permissions to write to /var/run/secrets/... from a unit test to mock in-cluster config for testing
|
||||
originalHost := os.Getenv("KUBERNETES_SERVICE_HOST")
|
||||
@@ -1525,6 +1546,110 @@ profiles:
|
||||
expectedError: `key "leaderElect" already set`,
|
||||
checkErrFn: runtime.IsStrictDecodingError,
|
||||
},
|
||||
{
|
||||
name: "high throughput profile",
|
||||
options: &Options{
|
||||
ConfigFile: highThroughputProfileConfig,
|
||||
Logs: logs.NewOptions(),
|
||||
},
|
||||
expectedUsername: "config",
|
||||
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: v1.SchemeGroupVersion.String(),
|
||||
},
|
||||
Parallelism: 16,
|
||||
DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{
|
||||
EnableProfiling: true,
|
||||
EnableContentionProfiling: true,
|
||||
},
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "leases",
|
||||
ResourceNamespace: "kube-system",
|
||||
ResourceName: "kube-scheduler",
|
||||
},
|
||||
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
|
||||
Kubeconfig: configKubeconfig,
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
},
|
||||
PercentageOfNodesToScore: defaultPercentageOfNodesToScore,
|
||||
PodInitialBackoffSeconds: defaultPodInitialBackoffSeconds,
|
||||
PodMaxBackoffSeconds: defaultPodMaxBackoffSeconds,
|
||||
Profiles: []kubeschedulerconfig.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "high-throughput-profile",
|
||||
Plugins: &kubeschedulerconfig.Plugins{
|
||||
QueueSort: defaults.PluginsV1.QueueSort,
|
||||
PreFilter: defaults.PluginsV1.PreFilter,
|
||||
Filter: defaults.PluginsV1.Filter,
|
||||
PostFilter: defaults.PluginsV1.PostFilter,
|
||||
PreScore: kubeschedulerconfig.PluginSet{
|
||||
Enabled: []kubeschedulerconfig.Plugin{
|
||||
{Name: "InterPodAffinity"},
|
||||
},
|
||||
},
|
||||
Score: defaults.PluginsV1.Score,
|
||||
Bind: defaults.PluginsV1.Bind,
|
||||
PreBind: defaults.PluginsV1.PreBind,
|
||||
Reserve: defaults.PluginsV1.Reserve,
|
||||
MultiPoint: defaults.PluginsV1.MultiPoint,
|
||||
},
|
||||
PluginConfig: []kubeschedulerconfig.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 1,
|
||||
IgnorePreferredTermsOfExistingPods: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DefaultPreemption",
|
||||
Args: &kubeschedulerconfig.DefaultPreemptionArgs{
|
||||
MinCandidateNodesPercentage: 10,
|
||||
MinCandidateNodesAbsolute: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NodeAffinity",
|
||||
Args: &kubeschedulerconfig.NodeAffinityArgs{},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesBalancedAllocation",
|
||||
Args: &kubeschedulerconfig.NodeResourcesBalancedAllocationArgs{
|
||||
Resources: []kubeschedulerconfig.ResourceSpec{{Name: "cpu", Weight: 1}, {Name: "memory", Weight: 1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
Args: &kubeschedulerconfig.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &kubeschedulerconfig.ScoringStrategy{
|
||||
Type: kubeschedulerconfig.LeastAllocated,
|
||||
Resources: []kubeschedulerconfig.ResourceSpec{{Name: "cpu", Weight: 1}, {Name: "memory", Weight: 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PodTopologySpread",
|
||||
Args: &kubeschedulerconfig.PodTopologySpreadArgs{
|
||||
DefaultingType: kubeschedulerconfig.SystemDefaulting,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "VolumeBinding",
|
||||
Args: &kubeschedulerconfig.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 600,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
@@ -1550,7 +1675,7 @@ profiles:
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Errorf("unexpected error to create a config: %v", err)
|
||||
t.Errorf("unexpected error creating config: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user