mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #21276 from mesosphere/jdef_host_port_endpoints_refactor
Auto commit by PR queue bot
This commit is contained in:
commit
eb44520f5f
@ -137,7 +137,8 @@ Host ports that are not defined, or else defined as zero, will automatically be
|
|||||||
|
|
||||||
To disable the work-around and revert to vanilla Kubernetes service endpoint termination:
|
To disable the work-around and revert to vanilla Kubernetes service endpoint termination:
|
||||||
|
|
||||||
- execute the k8sm controller-manager with `-host_port_endpoints=false`;
|
- execute the k8sm scheduler with `-host-port-endpoints=false`
|
||||||
|
- execute the k8sm controller-manager with `-host-port-endpoints=false`
|
||||||
|
|
||||||
Then the usual Kubernetes network assumptions must be fulfilled for Kubernetes to work with Mesos, i.e. each container must get a cluster-wide routable IP (compare [Kubernetes Networking documentation](../../../docs/design/networking.md#container-to-container)).
|
Then the usual Kubernetes network assumptions must be fulfilled for Kubernetes to work with Mesos, i.e. each container must get a cluster-wide routable IP (compare [Kubernetes Networking documentation](../../../docs/design/networking.md#container-to-container)).
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ func NewCMServer() *CMServer {
|
|||||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
s.CMServer.AddFlags(fs)
|
s.CMServer.AddFlags(fs)
|
||||||
fs.BoolVar(&s.UseHostPortEndpoints, "host_port_endpoints", s.UseHostPortEndpoints, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
|
fs.BoolVar(&s.UseHostPortEndpoints, "host-port-endpoints", s.UseHostPortEndpoints, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *CMServer) resyncPeriod() time.Duration {
|
func (s *CMServer) resyncPeriod() time.Duration {
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
||||||
kmruntime "k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
kmruntime "k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
@ -197,11 +198,11 @@ func TestExecutorLaunchAndKillTask(t *testing.T) {
|
|||||||
|
|
||||||
podTask, err := podtask.New(
|
podTask, err := podtask.New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"",
|
podtask.Config{
|
||||||
|
Prototype: executorinfo,
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
pod,
|
pod,
|
||||||
executorinfo,
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
)
|
)
|
||||||
assert.Equal(t, nil, err, "must be able to create a task from a pod")
|
assert.Equal(t, nil, err, "must be able to create a task from a pod")
|
||||||
|
|
||||||
@ -407,11 +408,12 @@ func TestExecutorFrameworkMessage(t *testing.T) {
|
|||||||
executorinfo := &mesosproto.ExecutorInfo{}
|
executorinfo := &mesosproto.ExecutorInfo{}
|
||||||
podTask, _ := podtask.New(
|
podTask, _ := podtask.New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"foo",
|
podtask.Config{
|
||||||
|
ID: "foo",
|
||||||
|
Prototype: executorinfo,
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
pod,
|
pod,
|
||||||
executorinfo,
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
)
|
)
|
||||||
pod.Annotations = map[string]string{
|
pod.Annotations = map[string]string{
|
||||||
"k8s.mesosphere.io/taskId": podTask.ID,
|
"k8s.mesosphere.io/taskId": podTask.ID,
|
||||||
|
@ -20,14 +20,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
log "github.com/golang/glog"
|
log "github.com/golang/glog"
|
||||||
"github.com/mesos/mesos-go/mesosproto"
|
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/offers"
|
"k8s.io/kubernetes/contrib/mesos/pkg/offers"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
|
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/algorithm/podschedulers"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/components/algorithm/podschedulers"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/errors"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/errors"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
)
|
)
|
||||||
@ -42,14 +41,12 @@ type SchedulerAlgorithm interface {
|
|||||||
|
|
||||||
// SchedulerAlgorithm implements the algorithm.ScheduleAlgorithm interface
|
// SchedulerAlgorithm implements the algorithm.ScheduleAlgorithm interface
|
||||||
type schedulerAlgorithm struct {
|
type schedulerAlgorithm struct {
|
||||||
sched scheduler.Scheduler
|
sched scheduler.Scheduler
|
||||||
podUpdates queue.FIFO
|
podUpdates queue.FIFO
|
||||||
podScheduler podschedulers.PodScheduler
|
podScheduler podschedulers.PodScheduler
|
||||||
prototype *mesosproto.ExecutorInfo
|
taskConfig podtask.Config
|
||||||
frameworkRoles []string
|
defaultCpus resources.CPUShares
|
||||||
defaultPodRoles []string
|
defaultMem resources.MegaBytes
|
||||||
defaultCpus mresource.CPUShares
|
|
||||||
defaultMem mresource.MegaBytes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new SchedulerAlgorithm
|
// New returns a new SchedulerAlgorithm
|
||||||
@ -58,20 +55,17 @@ func New(
|
|||||||
sched scheduler.Scheduler,
|
sched scheduler.Scheduler,
|
||||||
podUpdates queue.FIFO,
|
podUpdates queue.FIFO,
|
||||||
podScheduler podschedulers.PodScheduler,
|
podScheduler podschedulers.PodScheduler,
|
||||||
prototype *mesosproto.ExecutorInfo,
|
taskConfig podtask.Config,
|
||||||
frameworkRoles, defaultPodRoles []string,
|
defaultCpus resources.CPUShares,
|
||||||
defaultCpus mresource.CPUShares,
|
defaultMem resources.MegaBytes,
|
||||||
defaultMem mresource.MegaBytes,
|
|
||||||
) SchedulerAlgorithm {
|
) SchedulerAlgorithm {
|
||||||
return &schedulerAlgorithm{
|
return &schedulerAlgorithm{
|
||||||
sched: sched,
|
sched: sched,
|
||||||
podUpdates: podUpdates,
|
podUpdates: podUpdates,
|
||||||
podScheduler: podScheduler,
|
podScheduler: podScheduler,
|
||||||
frameworkRoles: frameworkRoles,
|
taskConfig: taskConfig,
|
||||||
defaultPodRoles: defaultPodRoles,
|
defaultCpus: defaultCpus,
|
||||||
prototype: prototype,
|
defaultMem: defaultMem,
|
||||||
defaultCpus: defaultCpus,
|
|
||||||
defaultMem: defaultMem,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +103,7 @@ func (k *schedulerAlgorithm) Schedule(pod *api.Pod) (string, error) {
|
|||||||
// From here on we can expect that the pod spec of a task has proper limits for CPU and memory.
|
// From here on we can expect that the pod spec of a task has proper limits for CPU and memory.
|
||||||
k.limitPod(pod)
|
k.limitPod(pod)
|
||||||
|
|
||||||
podTask, err := podtask.New(ctx, "", pod, k.prototype, k.frameworkRoles, k.defaultPodRoles)
|
podTask, err := podtask.New(ctx, k.taskConfig, pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warningf("aborting Schedule, unable to create podtask object %+v: %v", pod, err)
|
log.Warningf("aborting Schedule, unable to create podtask object %+v: %v", pod, err)
|
||||||
return "", err
|
return "", err
|
||||||
@ -146,12 +140,12 @@ func (k *schedulerAlgorithm) Schedule(pod *api.Pod) (string, error) {
|
|||||||
|
|
||||||
// limitPod limits the given pod based on the scheduler's default limits.
|
// limitPod limits the given pod based on the scheduler's default limits.
|
||||||
func (k *schedulerAlgorithm) limitPod(pod *api.Pod) error {
|
func (k *schedulerAlgorithm) limitPod(pod *api.Pod) error {
|
||||||
cpuRequest, cpuLimit, _, err := mresource.LimitPodCPU(pod, k.defaultCpus)
|
cpuRequest, cpuLimit, _, err := resources.LimitPodCPU(pod, k.defaultCpus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
memRequest, memLimit, _, err := mresource.LimitPodMem(pod, k.defaultMem)
|
memRequest, memLimit, _, err := resources.LimitPodMem(pod, k.defaultMem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
types "k8s.io/kubernetes/contrib/mesos/pkg/scheduler"
|
types "k8s.io/kubernetes/contrib/mesos/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/errors"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/errors"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/queuer"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/queuer"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
)
|
)
|
||||||
@ -63,11 +64,12 @@ func TestDeleteOne_PendingPod(t *testing.T) {
|
|||||||
}}}
|
}}}
|
||||||
task, err := podtask.New(
|
task, err := podtask.New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"bar",
|
podtask.Config{
|
||||||
|
ID: "bar",
|
||||||
|
Prototype: &mesosproto.ExecutorInfo{},
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
pod.Pod,
|
pod.Pod,
|
||||||
&mesosproto.ExecutorInfo{},
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create task: %v", err)
|
t.Fatalf("failed to create task: %v", err)
|
||||||
@ -110,11 +112,12 @@ func TestDeleteOne_Running(t *testing.T) {
|
|||||||
}}}
|
}}}
|
||||||
task, err := podtask.New(
|
task, err := podtask.New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"bar",
|
podtask.Config{
|
||||||
|
ID: "bar",
|
||||||
|
Prototype: &mesosproto.ExecutorInfo{},
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
pod.Pod,
|
pod.Pod,
|
||||||
&mesosproto.ExecutorInfo{},
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
@ -39,7 +39,7 @@ import (
|
|||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/config"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/config"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/queuer"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/queuer"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
@ -65,10 +65,9 @@ func New(
|
|||||||
terminate <-chan struct{},
|
terminate <-chan struct{},
|
||||||
mux *http.ServeMux,
|
mux *http.ServeMux,
|
||||||
lw *cache.ListWatch,
|
lw *cache.ListWatch,
|
||||||
prototype *mesos.ExecutorInfo,
|
taskConfig podtask.Config,
|
||||||
frameworkRoles, defaultPodRoles []string,
|
defaultCpus resources.CPUShares,
|
||||||
defaultCpus mresource.CPUShares,
|
defaultMem resources.MegaBytes,
|
||||||
defaultMem mresource.MegaBytes,
|
|
||||||
) scheduler.Scheduler {
|
) scheduler.Scheduler {
|
||||||
core := &sched{
|
core := &sched{
|
||||||
framework: fw,
|
framework: fw,
|
||||||
@ -82,7 +81,7 @@ func New(
|
|||||||
|
|
||||||
q := queuer.New(queue.NewDelayFIFO(), podUpdates)
|
q := queuer.New(queue.NewDelayFIFO(), podUpdates)
|
||||||
|
|
||||||
algorithm := algorithm.New(core, podUpdates, ps, prototype, frameworkRoles, defaultPodRoles, defaultCpus, defaultMem)
|
algorithm := algorithm.New(core, podUpdates, ps, taskConfig, defaultCpus, defaultMem)
|
||||||
|
|
||||||
podDeleter := deleter.New(core, q)
|
podDeleter := deleter.New(core, q)
|
||||||
|
|
||||||
|
@ -43,7 +43,8 @@ import (
|
|||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/ha"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/ha"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
@ -524,11 +525,14 @@ func newLifecycleTest(t *testing.T) lifecycleTest {
|
|||||||
schedulerProc.Terminal(),
|
schedulerProc.Terminal(),
|
||||||
http.DefaultServeMux,
|
http.DefaultServeMux,
|
||||||
&podsListWatch.ListWatch,
|
&podsListWatch.ListWatch,
|
||||||
ei,
|
podtask.Config{
|
||||||
[]string{"*"},
|
Prototype: ei,
|
||||||
[]string{"*"},
|
FrameworkRoles: []string{"*"},
|
||||||
mresource.DefaultDefaultContainerCPULimit,
|
DefaultPodRoles: []string{"*"},
|
||||||
mresource.DefaultDefaultContainerMemLimit,
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
|
resources.DefaultDefaultContainerCPULimit,
|
||||||
|
resources.DefaultDefaultContainerMemLimit,
|
||||||
)
|
)
|
||||||
assert.NotNil(scheduler)
|
assert.NotNil(scheduler)
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package podtask
|
package hostport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -22,38 +22,30 @@ import (
|
|||||||
log "github.com/golang/glog"
|
log "github.com/golang/glog"
|
||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// Objects implementing the Mapper interface generate port mappings
|
||||||
// maps a Container.HostPort to the same exact offered host port, ignores .HostPort = 0
|
|
||||||
HostPortMappingFixed = "fixed"
|
|
||||||
// same as HostPortMappingFixed, except that .HostPort of 0 are mapped to any port offered
|
|
||||||
HostPortMappingWildcard = "wildcard"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Objects implementing the HostPortMapper interface generate port mappings
|
|
||||||
// from k8s container ports to ports offered by mesos
|
// from k8s container ports to ports offered by mesos
|
||||||
type HostPortMapper interface {
|
type Mapper interface {
|
||||||
// Map maps the given pod task and the given mesos offer
|
// Map maps the given pod and the given mesos offer and returns a
|
||||||
// and returns a slice of port mappings
|
// slice of port mappings or an error if the mapping failed
|
||||||
// or an error if the mapping failed
|
Map(pod *api.Pod, roles []string, offer *mesos.Offer) ([]Mapping, error)
|
||||||
Map(t *T, offer *mesos.Offer) ([]HostPortMapping, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostPortMapperFunc is a function adapter to the HostPortMapper interface
|
// MapperFunc is a function adapter to the Mapper interface
|
||||||
type HostPortMapperFunc func(*T, *mesos.Offer) ([]HostPortMapping, error)
|
type MapperFunc func(*api.Pod, []string, *mesos.Offer) ([]Mapping, error)
|
||||||
|
|
||||||
// Map calls f(t, offer)
|
// Map calls f(t, offer)
|
||||||
func (f HostPortMapperFunc) Map(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
func (f MapperFunc) Map(pod *api.Pod, roles []string, offer *mesos.Offer) ([]Mapping, error) {
|
||||||
return f(t, offer)
|
return f(pod, roles, offer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A HostPortMapping represents the mapping between k8s container ports
|
// A Mapping represents the mapping between k8s container ports
|
||||||
// ports offered by mesos. It references the k8s' container and port
|
// ports offered by mesos. It references the k8s' container and port
|
||||||
// and specifies the offered mesos port and the offered port's role
|
// and specifies the offered mesos port and the offered port's role
|
||||||
type HostPortMapping struct {
|
type Mapping struct {
|
||||||
ContainerIdx int // index of the container in the pod spec
|
ContainerIdx int // index of the container in the pod spec
|
||||||
PortIdx int // index of the port in a container's port spec
|
PortIdx int // index of the port in a container's port spec
|
||||||
OfferPort uint64 // the port offered by mesos
|
OfferPort uint64 // the port offered by mesos
|
||||||
@ -61,27 +53,27 @@ type HostPortMapping struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PortAllocationError struct {
|
type PortAllocationError struct {
|
||||||
PodId string
|
PodID string
|
||||||
Ports []uint64
|
Ports []uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *PortAllocationError) Error() string {
|
func (err *PortAllocationError) Error() string {
|
||||||
return fmt.Sprintf("Could not schedule pod %s: %d port(s) could not be allocated", err.PodId, len(err.Ports))
|
return fmt.Sprintf("Could not schedule pod %s: %d port(s) could not be allocated", err.PodID, len(err.Ports))
|
||||||
}
|
}
|
||||||
|
|
||||||
type DuplicateHostPortError struct {
|
type DuplicateError struct {
|
||||||
m1, m2 HostPortMapping
|
m1, m2 Mapping
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *DuplicateHostPortError) Error() string {
|
func (err *DuplicateError) Error() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"Host port %d is specified for container %d, pod %d and container %d, pod %d",
|
"Host port %d is specified for container %d, pod %d and container %d, pod %d",
|
||||||
err.m1.OfferPort, err.m1.ContainerIdx, err.m1.PortIdx, err.m2.ContainerIdx, err.m2.PortIdx)
|
err.m1.OfferPort, err.m1.ContainerIdx, err.m1.PortIdx, err.m2.ContainerIdx, err.m2.PortIdx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WildcardMapper maps k8s wildcard ports (hostPort == 0) to any available offer port
|
// WildcardMapper maps k8s wildcard ports (hostPort == 0) to any available offer port
|
||||||
func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
func WildcardMapper(pod *api.Pod, roles []string, offer *mesos.Offer) ([]Mapping, error) {
|
||||||
mapping, err := FixedMapper(t, offer)
|
mapping, err := FixedMapper(pod, roles, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -91,11 +83,11 @@ func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
taken[entry.OfferPort] = struct{}{}
|
taken[entry.OfferPort] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
wildports := []HostPortMapping{}
|
wildports := []Mapping{}
|
||||||
for i, container := range t.Pod.Spec.Containers {
|
for i, container := range pod.Spec.Containers {
|
||||||
for pi, port := range container.Ports {
|
for pi, port := range container.Ports {
|
||||||
if port.HostPort == 0 {
|
if port.HostPort == 0 {
|
||||||
wildports = append(wildports, HostPortMapping{
|
wildports = append(wildports, Mapping{
|
||||||
ContainerIdx: i,
|
ContainerIdx: i,
|
||||||
PortIdx: pi,
|
PortIdx: pi,
|
||||||
})
|
})
|
||||||
@ -104,7 +96,7 @@ func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
remaining := len(wildports)
|
remaining := len(wildports)
|
||||||
foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
|
resources.ForeachPortsRange(offer.GetResources(), roles, func(bp, ep uint64, role string) {
|
||||||
log.V(3).Infof("Searching for wildcard port in range {%d:%d}", bp, ep)
|
log.V(3).Infof("Searching for wildcard port in range {%d:%d}", bp, ep)
|
||||||
for i := range wildports {
|
for i := range wildports {
|
||||||
if wildports[i].OfferPort != 0 {
|
if wildports[i].OfferPort != 0 {
|
||||||
@ -115,7 +107,7 @@ func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wildports[i].OfferPort = port
|
wildports[i].OfferPort = port
|
||||||
wildports[i].Role = starredRole(role)
|
wildports[i].Role = resources.CanonicalRole(role)
|
||||||
mapping = append(mapping, wildports[i])
|
mapping = append(mapping, wildports[i])
|
||||||
remaining--
|
remaining--
|
||||||
taken[port] = struct{}{}
|
taken[port] = struct{}{}
|
||||||
@ -126,7 +118,7 @@ func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
|
|
||||||
if remaining > 0 {
|
if remaining > 0 {
|
||||||
err := &PortAllocationError{
|
err := &PortAllocationError{
|
||||||
PodId: t.Pod.Name,
|
PodID: pod.Namespace + "/" + pod.Name,
|
||||||
}
|
}
|
||||||
// it doesn't make sense to include a port list here because they were all zero (wildcards)
|
// it doesn't make sense to include a port list here because they were all zero (wildcards)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -136,10 +128,10 @@ func WildcardMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FixedMapper maps k8s host ports to offered ports ignoring hostPorts == 0 (remaining pod-private)
|
// FixedMapper maps k8s host ports to offered ports ignoring hostPorts == 0 (remaining pod-private)
|
||||||
func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
func FixedMapper(pod *api.Pod, roles []string, offer *mesos.Offer) ([]Mapping, error) {
|
||||||
requiredPorts := make(map[uint64]HostPortMapping)
|
requiredPorts := make(map[uint64]Mapping)
|
||||||
mapping := []HostPortMapping{}
|
mapping := []Mapping{}
|
||||||
for i, container := range t.Pod.Spec.Containers {
|
for i, container := range pod.Spec.Containers {
|
||||||
// strip all port==0 from this array; k8s already knows what to do with zero-
|
// strip all port==0 from this array; k8s already knows what to do with zero-
|
||||||
// ports (it does not create 'port bindings' on the minion-host); we need to
|
// ports (it does not create 'port bindings' on the minion-host); we need to
|
||||||
// remove the wildcards from this array since they don't consume host resources
|
// remove the wildcards from this array since they don't consume host resources
|
||||||
@ -147,24 +139,24 @@ func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
if port.HostPort == 0 {
|
if port.HostPort == 0 {
|
||||||
continue // ignore
|
continue // ignore
|
||||||
}
|
}
|
||||||
m := HostPortMapping{
|
m := Mapping{
|
||||||
ContainerIdx: i,
|
ContainerIdx: i,
|
||||||
PortIdx: pi,
|
PortIdx: pi,
|
||||||
OfferPort: uint64(port.HostPort),
|
OfferPort: uint64(port.HostPort),
|
||||||
}
|
}
|
||||||
if entry, inuse := requiredPorts[uint64(port.HostPort)]; inuse {
|
if entry, inuse := requiredPorts[uint64(port.HostPort)]; inuse {
|
||||||
return nil, &DuplicateHostPortError{entry, m}
|
return nil, &DuplicateError{entry, m}
|
||||||
}
|
}
|
||||||
requiredPorts[uint64(port.HostPort)] = m
|
requiredPorts[uint64(port.HostPort)] = m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
foreachPortsRange(offer.GetResources(), t.Roles(), func(bp, ep uint64, role string) {
|
resources.ForeachPortsRange(offer.GetResources(), roles, func(bp, ep uint64, role string) {
|
||||||
for port := range requiredPorts {
|
for port := range requiredPorts {
|
||||||
log.V(3).Infof("evaluating port range {%d:%d} %d", bp, ep, port)
|
log.V(3).Infof("evaluating port range {%d:%d} %d", bp, ep, port)
|
||||||
if (bp <= port) && (port <= ep) {
|
if (bp <= port) && (port <= ep) {
|
||||||
m := requiredPorts[port]
|
m := requiredPorts[port]
|
||||||
m.Role = starredRole(role)
|
m.Role = resources.CanonicalRole(role)
|
||||||
mapping = append(mapping, m)
|
mapping = append(mapping, m)
|
||||||
delete(requiredPorts, port)
|
delete(requiredPorts, port)
|
||||||
}
|
}
|
||||||
@ -174,7 +166,7 @@ func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
unsatisfiedPorts := len(requiredPorts)
|
unsatisfiedPorts := len(requiredPorts)
|
||||||
if unsatisfiedPorts > 0 {
|
if unsatisfiedPorts > 0 {
|
||||||
err := &PortAllocationError{
|
err := &PortAllocationError{
|
||||||
PodId: t.Pod.Name,
|
PodID: pod.Namespace + "/" + pod.Name,
|
||||||
}
|
}
|
||||||
for p := range requiredPorts {
|
for p := range requiredPorts {
|
||||||
err.Ports = append(err.Ports, p)
|
err.Ports = append(err.Ports, p)
|
||||||
@ -185,15 +177,35 @@ func FixedMapper(t *T, offer *mesos.Offer) ([]HostPortMapping, error) {
|
|||||||
return mapping, nil
|
return mapping, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHostPortMapper returns a new mapper based
|
type Strategy string
|
||||||
// based on the port mapping key value
|
|
||||||
func NewHostPortMapper(pod *api.Pod) HostPortMapper {
|
const (
|
||||||
filter := map[string]string{
|
// maps a Container.HostPort to the same exact offered host port, ignores .HostPort = 0
|
||||||
meta.PortMappingKey: HostPortMappingFixed,
|
StrategyFixed = Strategy("fixed")
|
||||||
}
|
// same as MappingFixed, except that .HostPort of 0 are mapped to any port offered
|
||||||
selector := labels.Set(filter).AsSelector()
|
StrategyWildcard = Strategy("wildcard")
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
)
|
||||||
return HostPortMapperFunc(FixedMapper)
|
|
||||||
}
|
var validStrategies = map[Strategy]MapperFunc{
|
||||||
return HostPortMapperFunc(WildcardMapper)
|
StrategyFixed: MapperFunc(FixedMapper),
|
||||||
|
StrategyWildcard: MapperFunc(WildcardMapper),
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapper returns a new mapper based on the port mapping key value
|
||||||
|
func (defaultStrategy Strategy) NewMapper(pod *api.Pod) Mapper {
|
||||||
|
strategy, ok := pod.Labels[meta.PortMappingKey]
|
||||||
|
if ok {
|
||||||
|
f, ok := validStrategies[Strategy(strategy)]
|
||||||
|
if ok {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
log.Warningf("invalid port mapping strategy %q, reverting to default %q", strategy, defaultStrategy)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, ok := validStrategies[defaultStrategy]
|
||||||
|
if ok {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("scheduler is misconfigured, unrecognized default strategy \"" + defaultStrategy + "\"")
|
||||||
}
|
}
|
@ -14,27 +14,31 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package podtask
|
package hostport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
"github.com/mesos/mesos-go/mesosutil"
|
"github.com/mesos/mesos-go/mesosutil"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDefaultHostPortMatching(t *testing.T) {
|
func TestDefaultHostPortMatching(t *testing.T) {
|
||||||
t.Parallel()
|
pod := &api.Pod{
|
||||||
task := fakePodTask("foo", nil, nil)
|
ObjectMeta: api.ObjectMeta{
|
||||||
pod := &task.Pod
|
Name: "foo",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
offer := &mesos.Offer{
|
offer := &mesos.Offer{
|
||||||
Resources: []*mesos.Resource{
|
Resources: []*mesos.Resource{
|
||||||
newPortsResource("*", 1, 1),
|
resources.NewPorts("*", 1, 1),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mapping, err := FixedMapper(task, offer)
|
mapping, err := FixedMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -52,12 +56,8 @@ func TestDefaultHostPortMatching(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
|
_, err = FixedMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err, _ := err.(*DuplicateError); err == nil {
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = FixedMapper(task, offer)
|
|
||||||
if err, _ := err.(*DuplicateHostPortError); err == nil {
|
|
||||||
t.Fatal("Expected duplicate port error")
|
t.Fatal("Expected duplicate port error")
|
||||||
} else if err.m1.OfferPort != 123 {
|
} else if err.m1.OfferPort != 123 {
|
||||||
t.Fatal("Expected duplicate host port 123")
|
t.Fatal("Expected duplicate host port 123")
|
||||||
@ -65,12 +65,15 @@ func TestDefaultHostPortMatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWildcardHostPortMatching(t *testing.T) {
|
func TestWildcardHostPortMatching(t *testing.T) {
|
||||||
t.Parallel()
|
pod := &api.Pod{
|
||||||
task := fakePodTask("foo", nil, nil)
|
ObjectMeta: api.ObjectMeta{
|
||||||
pod := &task.Pod
|
Name: "foo",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
offer := &mesos.Offer{}
|
offer := &mesos.Offer{}
|
||||||
mapping, err := WildcardMapper(task, offer)
|
mapping, err := WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -81,10 +84,10 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
//--
|
//--
|
||||||
offer = &mesos.Offer{
|
offer = &mesos.Offer{
|
||||||
Resources: []*mesos.Resource{
|
Resources: []*mesos.Resource{
|
||||||
newPortsResource("*", 1, 1),
|
resources.NewPorts("*", 1, 1),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mapping, err = WildcardMapper(task, offer)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -100,11 +103,7 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mapping, err = WildcardMapper(task, offer)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error instead of mappings: %#v", mapping)
|
t.Fatalf("expected error instead of mappings: %#v", mapping)
|
||||||
} else if err, _ := err.(*PortAllocationError); err == nil {
|
} else if err, _ := err.(*PortAllocationError); err == nil {
|
||||||
@ -123,11 +122,7 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mapping, err = WildcardMapper(task, offer)
|
|
||||||
if err, _ := err.(*PortAllocationError); err == nil {
|
if err, _ := err.(*PortAllocationError); err == nil {
|
||||||
t.Fatal("Expected port allocation error")
|
t.Fatal("Expected port allocation error")
|
||||||
} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
|
} else if !(len(err.Ports) == 1 && err.Ports[0] == 123) {
|
||||||
@ -144,11 +139,7 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mapping, err = WildcardMapper(task, offer)
|
|
||||||
if err, _ := err.(*PortAllocationError); err == nil {
|
if err, _ := err.(*PortAllocationError); err == nil {
|
||||||
t.Fatal("Expected port allocation error")
|
t.Fatal("Expected port allocation error")
|
||||||
} else if len(err.Ports) != 0 {
|
} else if len(err.Ports) != 0 {
|
||||||
@ -158,10 +149,10 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
//--
|
//--
|
||||||
offer = &mesos.Offer{
|
offer = &mesos.Offer{
|
||||||
Resources: []*mesos.Resource{
|
Resources: []*mesos.Resource{
|
||||||
newPortsResource("*", 1, 2),
|
resources.NewPorts("*", 1, 2),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mapping, err = WildcardMapper(task, offer)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if len(mapping) != 2 {
|
} else if len(mapping) != 2 {
|
||||||
@ -190,16 +181,12 @@ func TestWildcardHostPortMatching(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
task, err = New(api.NewDefaultContext(), "", pod, &mesos.ExecutorInfo{}, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
offer = &mesos.Offer{
|
offer = &mesos.Offer{
|
||||||
Resources: []*mesos.Resource{
|
Resources: []*mesos.Resource{
|
||||||
mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
|
mesosutil.NewRangesResource("ports", []*mesos.Value_Range{mesosutil.NewValueRange(1, 1), mesosutil.NewValueRange(3, 5)}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mapping, err = WildcardMapper(task, offer)
|
mapping, err = WildcardMapper(pod, []string{"*"}, offer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if len(mapping) != 2 {
|
} else if len(mapping) != 2 {
|
@ -27,7 +27,9 @@ import (
|
|||||||
"k8s.io/kubernetes/contrib/mesos/pkg/offers"
|
"k8s.io/kubernetes/contrib/mesos/pkg/offers"
|
||||||
mesosmeta "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
mesosmeta "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
|
||||||
log "github.com/golang/glog"
|
log "github.com/golang/glog"
|
||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
@ -53,9 +55,22 @@ const (
|
|||||||
|
|
||||||
var starRole = []string{"*"}
|
var starRole = []string{"*"}
|
||||||
|
|
||||||
|
// Config represents elements that are used or required in order to
|
||||||
|
// create a pod task that may be scheduled.
|
||||||
|
type Config struct {
|
||||||
|
ID string // ID is an optional, unique task ID; auto-generated if not specified
|
||||||
|
DefaultPodRoles []string // DefaultPodRoles lists preferred resource groups, prioritized in order
|
||||||
|
FrameworkRoles []string // FrameworkRoles identify resource groups from which the framework may consume
|
||||||
|
Prototype *mesos.ExecutorInfo // Prototype is required
|
||||||
|
HostPortStrategy hostport.Strategy // HostPortStrategy is used as the port mapping strategy, unless overridden by the pod
|
||||||
|
GenerateTaskDiscoveryEnabled bool
|
||||||
|
mapper hostport.Mapper // host-port mapping func, derived from pod and default strategy
|
||||||
|
podKey string // k8s key for this pod; managed internally
|
||||||
|
}
|
||||||
|
|
||||||
// A struct that describes a pod task.
|
// A struct that describes a pod task.
|
||||||
type T struct {
|
type T struct {
|
||||||
ID string
|
Config
|
||||||
Pod api.Pod
|
Pod api.Pod
|
||||||
|
|
||||||
// Stores the final procurement result, once set read-only.
|
// Stores the final procurement result, once set read-only.
|
||||||
@ -68,26 +83,16 @@ type T struct {
|
|||||||
CreateTime time.Time
|
CreateTime time.Time
|
||||||
UpdatedTime time.Time // time of the most recent StatusUpdate we've seen from the mesos master
|
UpdatedTime time.Time // time of the most recent StatusUpdate we've seen from the mesos master
|
||||||
|
|
||||||
podStatus api.PodStatus
|
podStatus api.PodStatus
|
||||||
prototype *mesos.ExecutorInfo // readonly
|
launchTime time.Time
|
||||||
frameworkRoles []string // Mesos framework roles, pods are allowed to be launched with those
|
bindTime time.Time
|
||||||
defaultPodRoles []string // roles under which pods are scheduled if none are specified in labels
|
|
||||||
podKey string
|
|
||||||
launchTime time.Time
|
|
||||||
bindTime time.Time
|
|
||||||
mapper HostPortMapper
|
|
||||||
}
|
|
||||||
|
|
||||||
type Port struct {
|
|
||||||
Port uint64
|
|
||||||
Role string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Spec struct {
|
type Spec struct {
|
||||||
SlaveID string
|
SlaveID string
|
||||||
AssignedSlave string
|
AssignedSlave string
|
||||||
Resources []*mesos.Resource
|
Resources []*mesos.Resource
|
||||||
PortMap []HostPortMapping
|
PortMap []hostport.Mapping
|
||||||
Data []byte
|
Data []byte
|
||||||
Executor *mesos.ExecutorInfo
|
Executor *mesos.ExecutorInfo
|
||||||
}
|
}
|
||||||
@ -130,9 +135,6 @@ func generateTaskName(pod *api.Pod) string {
|
|||||||
return fmt.Sprintf("%s.%s.pod", pod.Name, ns)
|
return fmt.Sprintf("%s.%s.pod", pod.Name, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateTaskDiscoveryEnabled turns on/off the generation of DiscoveryInfo for TaskInfo records
|
|
||||||
var GenerateTaskDiscoveryEnabled = false
|
|
||||||
|
|
||||||
func generateTaskDiscovery(pod *api.Pod) *mesos.DiscoveryInfo {
|
func generateTaskDiscovery(pod *api.Pod) *mesos.DiscoveryInfo {
|
||||||
di := &mesos.DiscoveryInfo{
|
di := &mesos.DiscoveryInfo{
|
||||||
Visibility: mesos.DiscoveryInfo_CLUSTER.Enum(),
|
Visibility: mesos.DiscoveryInfo_CLUSTER.Enum(),
|
||||||
@ -195,7 +197,7 @@ func (t *T) BuildTaskInfo() (*mesos.TaskInfo, error) {
|
|||||||
SlaveId: mutil.NewSlaveID(t.Spec.SlaveID),
|
SlaveId: mutil.NewSlaveID(t.Spec.SlaveID),
|
||||||
}
|
}
|
||||||
|
|
||||||
if GenerateTaskDiscoveryEnabled {
|
if t.GenerateTaskDiscoveryEnabled {
|
||||||
info.Discovery = generateTaskDiscovery(&t.Pod)
|
info.Discovery = generateTaskDiscovery(&t.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,46 +239,45 @@ func (t *T) Roles() (result []string) {
|
|||||||
|
|
||||||
return filterRoles(
|
return filterRoles(
|
||||||
roles,
|
roles,
|
||||||
not(emptyRole), not(seenRole()), inRoles(t.frameworkRoles...),
|
not(emptyRole), not(seenRole()), inRoles(t.FrameworkRoles...),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// no roles label defined, return defaults
|
// no roles label defined, return defaults
|
||||||
return t.defaultPodRoles
|
return t.DefaultPodRoles
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(ctx api.Context, id string, pod *api.Pod, prototype *mesos.ExecutorInfo, frameworkRoles, defaultPodRoles []string) (*T, error) {
|
func New(ctx api.Context, config Config, pod *api.Pod) (*T, error) {
|
||||||
if prototype == nil {
|
if config.Prototype == nil {
|
||||||
return nil, fmt.Errorf("illegal argument: executor is nil")
|
return nil, fmt.Errorf("illegal argument: executor-info prototype is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(frameworkRoles) == 0 {
|
if len(config.FrameworkRoles) == 0 {
|
||||||
frameworkRoles = starRole
|
config.FrameworkRoles = starRole
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(defaultPodRoles) == 0 {
|
if len(config.DefaultPodRoles) == 0 {
|
||||||
defaultPodRoles = starRole
|
config.DefaultPodRoles = starRole
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := MakePodKey(ctx, pod.Name)
|
key, err := MakePodKey(ctx, pod.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
config.podKey = key
|
||||||
|
|
||||||
if id == "" {
|
if config.ID == "" {
|
||||||
id = "pod." + uuid.NewUUID().String()
|
config.ID = "pod." + uuid.NewUUID().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the scheduler better get the fallback strategy right, otherwise we panic here
|
||||||
|
config.mapper = config.HostPortStrategy.NewMapper(pod)
|
||||||
|
|
||||||
task := &T{
|
task := &T{
|
||||||
ID: id,
|
Pod: *pod,
|
||||||
Pod: *pod,
|
Config: config,
|
||||||
State: StatePending,
|
State: StatePending,
|
||||||
podKey: key,
|
Flags: make(map[FlagType]struct{}),
|
||||||
mapper: NewHostPortMapper(pod),
|
|
||||||
Flags: make(map[FlagType]struct{}),
|
|
||||||
prototype: prototype,
|
|
||||||
frameworkRoles: frameworkRoles,
|
|
||||||
defaultPodRoles: defaultPodRoles,
|
|
||||||
}
|
}
|
||||||
task.CreateTime = time.Now()
|
task.CreateTime = time.Now()
|
||||||
|
|
||||||
@ -305,11 +306,17 @@ func (t *T) SaveRecoveryInfo(dict map[string]string) {
|
|||||||
func RecoverFrom(pod api.Pod) (*T, bool, error) {
|
func RecoverFrom(pod api.Pod) (*T, bool, error) {
|
||||||
// we only expect annotations if pod has been bound, which implies that it has already
|
// we only expect annotations if pod has been bound, which implies that it has already
|
||||||
// been scheduled and launched
|
// been scheduled and launched
|
||||||
if pod.Spec.NodeName == "" && len(pod.Annotations) == 0 {
|
if len(pod.Annotations) == 0 {
|
||||||
log.V(1).Infof("skipping recovery for unbound pod %v/%v", pod.Namespace, pod.Name)
|
log.V(1).Infof("skipping recovery for unbound pod %v/%v", pod.Namespace, pod.Name)
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// we don't track mirror pods, they're considered part of the executor
|
||||||
|
if _, isMirrorPod := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; isMirrorPod {
|
||||||
|
log.V(1).Infof("skipping recovery for mirror pod %v/%v", pod.Namespace, pod.Name)
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// only process pods that are not in a terminal state
|
// only process pods that are not in a terminal state
|
||||||
switch pod.Status.Phase {
|
switch pod.Status.Phase {
|
||||||
case api.PodPending, api.PodRunning, api.PodUnknown: // continue
|
case api.PodPending, api.PodRunning, api.PodUnknown: // continue
|
||||||
@ -328,12 +335,13 @@ func RecoverFrom(pod api.Pod) (*T, bool, error) {
|
|||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
t := &T{
|
t := &T{
|
||||||
|
Config: Config{
|
||||||
|
podKey: key,
|
||||||
|
},
|
||||||
Pod: pod,
|
Pod: pod,
|
||||||
CreateTime: now,
|
CreateTime: now,
|
||||||
podKey: key,
|
|
||||||
State: StatePending, // possibly running? mesos will tell us during reconciliation
|
State: StatePending, // possibly running? mesos will tell us during reconciliation
|
||||||
Flags: make(map[FlagType]struct{}),
|
Flags: make(map[FlagType]struct{}),
|
||||||
mapper: NewHostPortMapper(&pod),
|
|
||||||
launchTime: now,
|
launchTime: now,
|
||||||
bindTime: now,
|
bindTime: now,
|
||||||
Spec: &Spec{},
|
Spec: &Spec{},
|
||||||
|
@ -26,6 +26,8 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/node"
|
"k8s.io/kubernetes/contrib/mesos/pkg/node"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,16 +39,18 @@ const (
|
|||||||
func fakePodTask(id string, allowedRoles, defaultRoles []string) *T {
|
func fakePodTask(id string, allowedRoles, defaultRoles []string) *T {
|
||||||
t, _ := New(
|
t, _ := New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"",
|
Config{
|
||||||
|
Prototype: &mesos.ExecutorInfo{},
|
||||||
|
FrameworkRoles: allowedRoles,
|
||||||
|
DefaultPodRoles: defaultRoles,
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
&api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: id,
|
Name: id,
|
||||||
Namespace: api.NamespaceDefault,
|
Namespace: api.NamespaceDefault,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&mesos.ExecutorInfo{},
|
|
||||||
allowedRoles,
|
|
||||||
defaultRoles,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return t
|
return t
|
||||||
@ -219,7 +223,7 @@ func TestAcceptOfferPorts(t *testing.T) {
|
|||||||
Resources: []*mesos.Resource{
|
Resources: []*mesos.Resource{
|
||||||
mutil.NewScalarResource("cpus", t_min_cpu),
|
mutil.NewScalarResource("cpus", t_min_cpu),
|
||||||
mutil.NewScalarResource("mem", t_min_mem),
|
mutil.NewScalarResource("mem", t_min_mem),
|
||||||
newPortsResource("*", 1, 1),
|
resources.NewPorts("*", 1, 1),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,12 +419,3 @@ func newScalarAttribute(name string, val float64) *mesos.Attribute {
|
|||||||
Scalar: &mesos.Value_Scalar{Value: proto.Float64(val)},
|
Scalar: &mesos.Value_Scalar{Value: proto.Float64(val)},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPortsResource(role string, ports ...uint64) *mesos.Resource {
|
|
||||||
return &mesos.Resource{
|
|
||||||
Name: proto.String("ports"),
|
|
||||||
Type: mesos.Value_RANGES.Enum(),
|
|
||||||
Ranges: newRanges(ports),
|
|
||||||
Role: stringPtrTo(role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
"github.com/mesos/mesos-go/mesosutil"
|
"github.com/mesos/mesos-go/mesosutil"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/executorinfo"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/executorinfo"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
)
|
)
|
||||||
@ -164,8 +164,8 @@ func NewPodResourcesProcurement() Procurement {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
wantedCpus := float64(mresource.NewCPUShares(limits[api.ResourceCPU]))
|
wantedCpus := float64(resources.NewCPUShares(limits[api.ResourceCPU]))
|
||||||
wantedMem := float64(mresource.NewMegaBytes(limits[api.ResourceMemory]))
|
wantedMem := float64(resources.NewMegaBytes(limits[api.ResourceMemory]))
|
||||||
|
|
||||||
log.V(4).Infof(
|
log.V(4).Infof(
|
||||||
"trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB",
|
"trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB",
|
||||||
@ -199,18 +199,18 @@ func NewPodResourcesProcurement() Procurement {
|
|||||||
func NewPortsProcurement() Procurement {
|
func NewPortsProcurement() Procurement {
|
||||||
return ProcurementFunc(func(t *T, _ *api.Node, ps *ProcureState) error {
|
return ProcurementFunc(func(t *T, _ *api.Node, ps *ProcureState) error {
|
||||||
// fill in port mapping
|
// fill in port mapping
|
||||||
if mapping, err := t.mapper.Map(t, ps.offer); err != nil {
|
if mapping, err := t.mapper.Map(&t.Pod, t.Roles(), ps.offer); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
ports := []Port{}
|
ports := []resources.Port{}
|
||||||
for _, entry := range mapping {
|
for _, entry := range mapping {
|
||||||
ports = append(ports, Port{
|
ports = append(ports, resources.Port{
|
||||||
Port: entry.OfferPort,
|
Port: entry.OfferPort,
|
||||||
Role: entry.Role,
|
Role: entry.Role,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
ps.spec.PortMap = mapping
|
ps.spec.PortMap = mapping
|
||||||
ps.spec.Resources = append(ps.spec.Resources, portRangeResources(ports)...)
|
ps.spec.Resources = append(ps.spec.Resources, resources.PortRanges(ports)...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -220,20 +220,20 @@ func NewPortsProcurement() Procurement {
|
|||||||
// If a given offer has no executor IDs set, the given prototype executor resources are considered for procurement.
|
// If a given offer has no executor IDs set, the given prototype executor resources are considered for procurement.
|
||||||
// If a given offer has one executor ID set, only pod resources are being procured.
|
// If a given offer has one executor ID set, only pod resources are being procured.
|
||||||
// An offer with more than one executor ID implies an invariant violation and the first executor ID is being considered.
|
// An offer with more than one executor ID implies an invariant violation and the first executor ID is being considered.
|
||||||
func NewExecutorResourceProcurer(resources []*mesos.Resource, registry executorinfo.Registry) Procurement {
|
func NewExecutorResourceProcurer(rs []*mesos.Resource, registry executorinfo.Registry) Procurement {
|
||||||
return ProcurementFunc(func(t *T, _ *api.Node, ps *ProcureState) error {
|
return ProcurementFunc(func(t *T, _ *api.Node, ps *ProcureState) error {
|
||||||
eids := len(ps.offer.GetExecutorIds())
|
eids := len(ps.offer.GetExecutorIds())
|
||||||
switch {
|
switch {
|
||||||
case eids == 0:
|
case eids == 0:
|
||||||
wantedCpus := sumResources(filterResources(resources, isScalar, hasName("cpus")))
|
wantedCpus := resources.Sum(resources.Filter(rs, resources.IsScalar, resources.HasName("cpus")))
|
||||||
wantedMem := sumResources(filterResources(resources, isScalar, hasName("mem")))
|
wantedMem := resources.Sum(resources.Filter(rs, resources.IsScalar, resources.HasName("mem")))
|
||||||
|
|
||||||
procuredCpu, remaining := procureScalarResources("cpus", wantedCpus, t.frameworkRoles, ps.offer.GetResources())
|
procuredCpu, remaining := procureScalarResources("cpus", wantedCpus, t.FrameworkRoles, ps.offer.GetResources())
|
||||||
if procuredCpu == nil {
|
if procuredCpu == nil {
|
||||||
return fmt.Errorf("not enough cpu resources for executor: want=%v", wantedCpus)
|
return fmt.Errorf("not enough cpu resources for executor: want=%v", wantedCpus)
|
||||||
}
|
}
|
||||||
|
|
||||||
procuredMem, remaining := procureScalarResources("mem", wantedMem, t.frameworkRoles, remaining)
|
procuredMem, remaining := procureScalarResources("mem", wantedMem, t.FrameworkRoles, remaining)
|
||||||
if procuredMem == nil {
|
if procuredMem == nil {
|
||||||
return fmt.Errorf("not enough mem resources for executor: want=%v", wantedMem)
|
return fmt.Errorf("not enough mem resources for executor: want=%v", wantedMem)
|
||||||
}
|
}
|
||||||
@ -273,12 +273,12 @@ func procureScalarResources(
|
|||||||
roles []string,
|
roles []string,
|
||||||
offered []*mesos.Resource,
|
offered []*mesos.Resource,
|
||||||
) (procured, remaining []*mesos.Resource) {
|
) (procured, remaining []*mesos.Resource) {
|
||||||
sorted := byRoles(roles...).sort(offered)
|
sorted := resources.ByRoles(roles...).Sort(offered)
|
||||||
procured = make([]*mesos.Resource, 0, len(sorted))
|
procured = make([]*mesos.Resource, 0, len(sorted))
|
||||||
remaining = make([]*mesos.Resource, 0, len(sorted))
|
remaining = make([]*mesos.Resource, 0, len(sorted))
|
||||||
|
|
||||||
for _, r := range sorted {
|
for _, r := range sorted {
|
||||||
if want >= epsilon && resourceMatchesAll(r, hasName(name), isScalar) {
|
if want >= epsilon && resources.MatchesAll(r, resources.HasName(name), resources.IsScalar) {
|
||||||
left, role := r.GetScalar().GetValue(), r.Role
|
left, role := r.GetScalar().GetValue(), r.Role
|
||||||
consumed := math.Min(want, left)
|
consumed := math.Min(want, left)
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ import (
|
|||||||
"github.com/mesos/mesos-go/mesosutil"
|
"github.com/mesos/mesos-go/mesosutil"
|
||||||
|
|
||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -51,7 +53,12 @@ func TestNewPodResourcesProcurement(t *testing.T) {
|
|||||||
|
|
||||||
task, _ := New(
|
task, _ := New(
|
||||||
api.NewDefaultContext(),
|
api.NewDefaultContext(),
|
||||||
"",
|
Config{
|
||||||
|
Prototype: executor,
|
||||||
|
FrameworkRoles: []string{"*"},
|
||||||
|
DefaultPodRoles: []string{"*"},
|
||||||
|
HostPortStrategy: hostport.StrategyWildcard,
|
||||||
|
},
|
||||||
&api.Pod{
|
&api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
@ -76,9 +83,6 @@ func TestNewPodResourcesProcurement(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
executor,
|
|
||||||
[]string{"*"},
|
|
||||||
[]string{"*"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
procurement := NewPodResourcesProcurement()
|
procurement := NewPodResourcesProcurement()
|
||||||
@ -214,6 +218,6 @@ func TestProcureRoleResources(t *testing.T) {
|
|||||||
|
|
||||||
func scalar(name string, value float64, role string) *mesos.Resource {
|
func scalar(name string, value float64, role string) *mesos.Resource {
|
||||||
res := mesosutil.NewScalarResource(name, value)
|
res := mesosutil.NewScalarResource(name, value)
|
||||||
res.Role = stringPtrTo(role)
|
res.Role = resources.StringPtrTo(role)
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
@ -81,24 +81,3 @@ func inRoles(roles ...string) rolePredicate {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// starredRole returns a "*" if the given role is empty else the role itself
|
|
||||||
func starredRole(name string) string {
|
|
||||||
if name == "" {
|
|
||||||
return "*"
|
|
||||||
}
|
|
||||||
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringPtrTo returns a pointer to the given string
|
|
||||||
// or nil if it is empty string.
|
|
||||||
func stringPtrTo(s string) *string {
|
|
||||||
var protos *string
|
|
||||||
|
|
||||||
if s != "" {
|
|
||||||
protos = &s
|
|
||||||
}
|
|
||||||
|
|
||||||
return protos
|
|
||||||
}
|
|
||||||
|
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package resource contains the Mesos scheduler specific resource functions
|
// Package resources contains the Mesos scheduler specific resource functions
|
||||||
package resource
|
package resources
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package resource
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package resource
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
@ -14,15 +14,20 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package podtask
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// portRangeResources creates a range resource for the spec ports.
|
type Port struct {
|
||||||
func portRangeResources(Ports []Port) []*mesos.Resource {
|
Port uint64
|
||||||
|
Role string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortRanges creates a range resource for the spec ports.
|
||||||
|
func PortRanges(Ports []Port) []*mesos.Resource {
|
||||||
rolePorts := make(map[string][]uint64, len(Ports))
|
rolePorts := make(map[string][]uint64, len(Ports))
|
||||||
|
|
||||||
for _, p := range Ports {
|
for _, p := range Ports {
|
||||||
@ -36,8 +41,8 @@ func portRangeResources(Ports []Port) []*mesos.Resource {
|
|||||||
&mesos.Resource{
|
&mesos.Resource{
|
||||||
Name: proto.String("ports"),
|
Name: proto.String("ports"),
|
||||||
Type: mesos.Value_RANGES.Enum(),
|
Type: mesos.Value_RANGES.Enum(),
|
||||||
Ranges: newRanges(ports),
|
Ranges: NewRanges(ports),
|
||||||
Role: stringPtrTo(role),
|
Role: StringPtrTo(role),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -45,8 +50,8 @@ func portRangeResources(Ports []Port) []*mesos.Resource {
|
|||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRanges generates port ranges from the given list of ports. (naive implementation)
|
// NewRanges generates port ranges from the given list of ports. (naive implementation)
|
||||||
func newRanges(ports []uint64) *mesos.Value_Ranges {
|
func NewRanges(ports []uint64) *mesos.Value_Ranges {
|
||||||
r := make([]*mesos.Value_Range, 0, len(ports))
|
r := make([]*mesos.Value_Range, 0, len(ports))
|
||||||
for _, port := range ports {
|
for _, port := range ports {
|
||||||
x := proto.Uint64(port)
|
x := proto.Uint64(port)
|
||||||
@ -55,11 +60,11 @@ func newRanges(ports []uint64) *mesos.Value_Ranges {
|
|||||||
return &mesos.Value_Ranges{Range: r}
|
return &mesos.Value_Ranges{Range: r}
|
||||||
}
|
}
|
||||||
|
|
||||||
// foreachPortsRange calls f for each resource that matches the given roles
|
// ForeachPortsRange calls f for each resource that matches the given roles
|
||||||
// in the order of the given roles.
|
// in the order of the given roles.
|
||||||
func foreachPortsRange(rs []*mesos.Resource, roles []string, f func(begin, end uint64, role string)) {
|
func ForeachPortsRange(rs []*mesos.Resource, roles []string, f func(begin, end uint64, role string)) {
|
||||||
rs = filterResources(rs, hasName("ports"))
|
rs = Filter(rs, HasName("ports"))
|
||||||
rs = byRoles(roles...).sort(rs)
|
rs = ByRoles(roles...).Sort(rs)
|
||||||
|
|
||||||
for _, resource := range rs {
|
for _, resource := range rs {
|
||||||
for _, r := range (*resource).GetRanges().Range {
|
for _, r := range (*resource).GetRanges().Range {
|
||||||
@ -70,22 +75,22 @@ func foreachPortsRange(rs []*mesos.Resource, roles []string, f func(begin, end u
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// byRolesSorter sorts resources according to the ordering of roles.
|
// ByRolesSorter sorts resources according to the ordering of roles.
|
||||||
type byRolesSorter struct {
|
type ByRolesSorter struct {
|
||||||
roles []string
|
roles []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// byRoles returns a byRolesSorter with the given roles.
|
// ByRoles returns a ByRolesSorter with the given roles.
|
||||||
func byRoles(roles ...string) *byRolesSorter {
|
func ByRoles(roles ...string) *ByRolesSorter {
|
||||||
return &byRolesSorter{roles: roles}
|
return &ByRolesSorter{roles: roles}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort sorts the given resources according to the order of roles in the byRolesSorter
|
// sort sorts the given resources according to the order of roles in the ByRolesSorter
|
||||||
// and returns the sorted resources.
|
// and returns the sorted resources.
|
||||||
func (sorter *byRolesSorter) sort(resources []*mesos.Resource) []*mesos.Resource {
|
func (sorter *ByRolesSorter) Sort(resources []*mesos.Resource) []*mesos.Resource {
|
||||||
rolesMap := map[string][]*mesos.Resource{} // maps roles to resources
|
rolesMap := map[string][]*mesos.Resource{} // maps roles to resources
|
||||||
for _, res := range resources {
|
for _, res := range resources {
|
||||||
role := starredRole(res.GetRole())
|
role := CanonicalRole(res.GetRole())
|
||||||
rolesMap[role] = append(rolesMap[role], res)
|
rolesMap[role] = append(rolesMap[role], res)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,12 +104,21 @@ func (sorter *byRolesSorter) sort(resources []*mesos.Resource) []*mesos.Resource
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// resourcePredicate is a predicate function on *mesos.Resource structs.
|
// ResourcePredicate is a predicate function on *mesos.Resource structs.
|
||||||
type resourcePredicate func(*mesos.Resource) bool
|
type (
|
||||||
|
ResourcePredicate func(*mesos.Resource) bool
|
||||||
|
ResourcePredicates []ResourcePredicate
|
||||||
|
)
|
||||||
|
|
||||||
// filter filters the given slice of resources and returns a slice of resources
|
// Filter filters the given slice of resources and returns a slice of resources
|
||||||
// matching all given predicates.
|
// matching all given predicates.
|
||||||
func filterResources(res []*mesos.Resource, ps ...resourcePredicate) []*mesos.Resource {
|
func Filter(res []*mesos.Resource, ps ...ResourcePredicate) []*mesos.Resource {
|
||||||
|
return ResourcePredicates(ps).Filter(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter filters the given slice of resources and returns a slice of resources
|
||||||
|
// matching all given predicates.
|
||||||
|
func (ps ResourcePredicates) Filter(res []*mesos.Resource) []*mesos.Resource {
|
||||||
filtered := make([]*mesos.Resource, 0, len(res))
|
filtered := make([]*mesos.Resource, 0, len(res))
|
||||||
|
|
||||||
next:
|
next:
|
||||||
@ -121,8 +135,13 @@ next:
|
|||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
// resourceMatchesAll returns true if the given resource matches all given predicates ps.
|
// MatchesAll returns true if the given resource matches all given predicates ps.
|
||||||
func resourceMatchesAll(res *mesos.Resource, ps ...resourcePredicate) bool {
|
func MatchesAll(res *mesos.Resource, ps ...ResourcePredicate) bool {
|
||||||
|
return ResourcePredicates(ps).MatchesAll(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchesAll returns true if the given resource matches all given predicates ps.
|
||||||
|
func (ps ResourcePredicates) MatchesAll(res *mesos.Resource) bool {
|
||||||
for _, p := range ps {
|
for _, p := range ps {
|
||||||
if !p(res) {
|
if !p(res) {
|
||||||
return false
|
return false
|
||||||
@ -132,7 +151,7 @@ func resourceMatchesAll(res *mesos.Resource, ps ...resourcePredicate) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func sumResources(res []*mesos.Resource) float64 {
|
func Sum(res []*mesos.Resource) float64 {
|
||||||
var sum float64
|
var sum float64
|
||||||
|
|
||||||
for _, r := range res {
|
for _, r := range res {
|
||||||
@ -142,15 +161,45 @@ func sumResources(res []*mesos.Resource) float64 {
|
|||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
// isScalar returns true if the given resource is a scalar type.
|
// IsScalar returns true if the given resource is a scalar type.
|
||||||
func isScalar(r *mesos.Resource) bool {
|
func IsScalar(r *mesos.Resource) bool {
|
||||||
return r.GetType() == mesos.Value_SCALAR
|
return r.GetType() == mesos.Value_SCALAR
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasName returns a resourcePredicate which returns true
|
// HasName returns a ResourcePredicate which returns true
|
||||||
// if the given resource has the given name.
|
// if the given resource has the given name.
|
||||||
func hasName(name string) resourcePredicate {
|
func HasName(name string) ResourcePredicate {
|
||||||
return func(r *mesos.Resource) bool {
|
return func(r *mesos.Resource) bool {
|
||||||
return r.GetName() == name
|
return r.GetName() == name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StringPtrTo returns a pointer to the given string
|
||||||
|
// or nil if it is empty string.
|
||||||
|
func StringPtrTo(s string) *string {
|
||||||
|
var protos *string
|
||||||
|
|
||||||
|
if s != "" {
|
||||||
|
protos = &s
|
||||||
|
}
|
||||||
|
|
||||||
|
return protos
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalRole returns a "*" if the given role is empty else the role itself
|
||||||
|
func CanonicalRole(name string) string {
|
||||||
|
if name == "" {
|
||||||
|
return "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPorts(role string, ports ...uint64) *mesos.Resource {
|
||||||
|
return &mesos.Resource{
|
||||||
|
Name: proto.String("ports"),
|
||||||
|
Type: mesos.Value_RANGES.Enum(),
|
||||||
|
Ranges: NewRanges(ports),
|
||||||
|
Role: StringPtrTo(role),
|
||||||
|
}
|
||||||
|
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package resource
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
@ -66,7 +66,8 @@ import (
|
|||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport"
|
||||||
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
@ -95,8 +96,8 @@ const (
|
|||||||
defaultReconcileCooldown = 15 * time.Second
|
defaultReconcileCooldown = 15 * time.Second
|
||||||
defaultNodeRelistPeriod = 5 * time.Minute
|
defaultNodeRelistPeriod = 5 * time.Minute
|
||||||
defaultFrameworkName = "Kubernetes"
|
defaultFrameworkName = "Kubernetes"
|
||||||
defaultExecutorCPUs = mresource.CPUShares(0.25) // initial CPU allocated for executor
|
defaultExecutorCPUs = resources.CPUShares(0.25) // initial CPU allocated for executor
|
||||||
defaultExecutorMem = mresource.MegaBytes(128.0) // initial memory allocated for executor
|
defaultExecutorMem = resources.MegaBytes(128.0) // initial memory allocated for executor
|
||||||
defaultExecutorInfoCacheSize = 10000
|
defaultExecutorInfoCacheSize = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -119,8 +120,8 @@ type SchedulerServer struct {
|
|||||||
mesosAuthPrincipal string
|
mesosAuthPrincipal string
|
||||||
mesosAuthSecretFile string
|
mesosAuthSecretFile string
|
||||||
mesosCgroupPrefix string
|
mesosCgroupPrefix string
|
||||||
mesosExecutorCPUs mresource.CPUShares
|
mesosExecutorCPUs resources.CPUShares
|
||||||
mesosExecutorMem mresource.MegaBytes
|
mesosExecutorMem resources.MegaBytes
|
||||||
checkpoint bool
|
checkpoint bool
|
||||||
failoverTimeout float64
|
failoverTimeout float64
|
||||||
generateTaskDiscovery bool
|
generateTaskDiscovery bool
|
||||||
@ -147,8 +148,8 @@ type SchedulerServer struct {
|
|||||||
hostnameOverride string
|
hostnameOverride string
|
||||||
reconcileInterval int64
|
reconcileInterval int64
|
||||||
reconcileCooldown time.Duration
|
reconcileCooldown time.Duration
|
||||||
defaultContainerCPULimit mresource.CPUShares
|
defaultContainerCPULimit resources.CPUShares
|
||||||
defaultContainerMemLimit mresource.MegaBytes
|
defaultContainerMemLimit resources.MegaBytes
|
||||||
schedulerConfigFileName string
|
schedulerConfigFileName string
|
||||||
graceful bool
|
graceful bool
|
||||||
frameworkName string
|
frameworkName string
|
||||||
@ -174,6 +175,7 @@ type SchedulerServer struct {
|
|||||||
sandboxOverlay string
|
sandboxOverlay string
|
||||||
conntrackMax int
|
conntrackMax int
|
||||||
conntrackTCPTimeoutEstablished int
|
conntrackTCPTimeoutEstablished int
|
||||||
|
useHostPortEndpoints bool
|
||||||
|
|
||||||
executable string // path to the binary running this service
|
executable string // path to the binary running this service
|
||||||
client *clientset.Clientset
|
client *clientset.Clientset
|
||||||
@ -202,8 +204,8 @@ func NewSchedulerServer() *SchedulerServer {
|
|||||||
runProxy: true,
|
runProxy: true,
|
||||||
executorSuicideTimeout: execcfg.DefaultSuicideTimeout,
|
executorSuicideTimeout: execcfg.DefaultSuicideTimeout,
|
||||||
launchGracePeriod: execcfg.DefaultLaunchGracePeriod,
|
launchGracePeriod: execcfg.DefaultLaunchGracePeriod,
|
||||||
defaultContainerCPULimit: mresource.DefaultDefaultContainerCPULimit,
|
defaultContainerCPULimit: resources.DefaultDefaultContainerCPULimit,
|
||||||
defaultContainerMemLimit: mresource.DefaultDefaultContainerMemLimit,
|
defaultContainerMemLimit: resources.DefaultDefaultContainerMemLimit,
|
||||||
|
|
||||||
proxyMode: "userspace", // upstream default is "iptables" post-v1.1
|
proxyMode: "userspace", // upstream default is "iptables" post-v1.1
|
||||||
|
|
||||||
@ -231,6 +233,7 @@ func NewSchedulerServer() *SchedulerServer {
|
|||||||
containPodResources: true,
|
containPodResources: true,
|
||||||
nodeRelistPeriod: defaultNodeRelistPeriod,
|
nodeRelistPeriod: defaultNodeRelistPeriod,
|
||||||
conntrackTCPTimeoutEstablished: 0, // non-zero values may require hand-tuning other sysctl's on the host; do so with caution
|
conntrackTCPTimeoutEstablished: 0, // non-zero values may require hand-tuning other sysctl's on the host; do so with caution
|
||||||
|
useHostPortEndpoints: true,
|
||||||
|
|
||||||
// non-zero values can trigger failures when updating /sys/module/nf_conntrack/parameters/hashsize
|
// non-zero values can trigger failures when updating /sys/module/nf_conntrack/parameters/hashsize
|
||||||
// when kube-proxy is running in a non-root netns (init_net); setting this to a non-zero value will
|
// when kube-proxy is running in a non-root netns (init_net); setting this to a non-zero value will
|
||||||
@ -293,6 +296,7 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
|
|||||||
fs.Var(&s.defaultContainerMemLimit, "default-container-mem-limit", "Containers without a memory resource limit are admitted this much amount of memory in MB")
|
fs.Var(&s.defaultContainerMemLimit, "default-container-mem-limit", "Containers without a memory resource limit are admitted this much amount of memory in MB")
|
||||||
fs.BoolVar(&s.containPodResources, "contain-pod-resources", s.containPodResources, "Reparent pod containers into mesos cgroups; disable if you're having strange mesos/docker/systemd interactions.")
|
fs.BoolVar(&s.containPodResources, "contain-pod-resources", s.containPodResources, "Reparent pod containers into mesos cgroups; disable if you're having strange mesos/docker/systemd interactions.")
|
||||||
fs.DurationVar(&s.nodeRelistPeriod, "node-monitor-period", s.nodeRelistPeriod, "Period between relisting of all nodes from the apiserver.")
|
fs.DurationVar(&s.nodeRelistPeriod, "node-monitor-period", s.nodeRelistPeriod, "Period between relisting of all nodes from the apiserver.")
|
||||||
|
fs.BoolVar(&s.useHostPortEndpoints, "host-port-endpoints", s.useHostPortEndpoints, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
|
||||||
|
|
||||||
fs.IntVar(&s.executorLogV, "executor-logv", s.executorLogV, "Logging verbosity of spawned minion and executor processes.")
|
fs.IntVar(&s.executorLogV, "executor-logv", s.executorLogV, "Logging verbosity of spawned minion and executor processes.")
|
||||||
fs.BoolVar(&s.executorBindall, "executor-bindall", s.executorBindall, "When true will set -address of the executor to 0.0.0.0.")
|
fs.BoolVar(&s.executorBindall, "executor-bindall", s.executorBindall, "When true will set -address of the executor to 0.0.0.0.")
|
||||||
@ -552,7 +556,6 @@ func (s *SchedulerServer) getDriver() (driver bindings.SchedulerDriver) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error {
|
func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error {
|
||||||
podtask.GenerateTaskDiscoveryEnabled = s.generateTaskDiscovery
|
|
||||||
if n := len(s.frameworkRoles); n == 0 || n > 2 || (n == 2 && s.frameworkRoles[0] != "*" && s.frameworkRoles[1] != "*") {
|
if n := len(s.frameworkRoles); n == 0 || n > 2 || (n == 2 && s.frameworkRoles[0] != "*" && s.frameworkRoles[1] != "*") {
|
||||||
log.Fatalf(`only one custom role allowed in addition to "*"`)
|
log.Fatalf(`only one custom role allowed in addition to "*"`)
|
||||||
}
|
}
|
||||||
@ -794,8 +797,14 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
|
|||||||
broadcaster.StartLogging(log.Infof)
|
broadcaster.StartLogging(log.Infof)
|
||||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{eventsClient.Events("")})
|
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{eventsClient.Events("")})
|
||||||
|
|
||||||
// create scheduler core with all components arranged around it
|
|
||||||
lw := cache.NewListWatchFromClient(s.client.CoreClient, "pods", api.NamespaceAll, fields.Everything())
|
lw := cache.NewListWatchFromClient(s.client.CoreClient, "pods", api.NamespaceAll, fields.Everything())
|
||||||
|
|
||||||
|
hostPortStrategy := hostport.StrategyFixed
|
||||||
|
if s.useHostPortEndpoints {
|
||||||
|
hostPortStrategy = hostport.StrategyWildcard
|
||||||
|
}
|
||||||
|
|
||||||
|
// create scheduler core with all components arranged around it
|
||||||
sched := components.New(
|
sched := components.New(
|
||||||
sc,
|
sc,
|
||||||
framework,
|
framework,
|
||||||
@ -805,9 +814,13 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
|
|||||||
schedulerProcess.Terminal(),
|
schedulerProcess.Terminal(),
|
||||||
s.mux,
|
s.mux,
|
||||||
lw,
|
lw,
|
||||||
eiPrototype,
|
podtask.Config{
|
||||||
s.frameworkRoles,
|
DefaultPodRoles: s.defaultPodRoles,
|
||||||
s.defaultPodRoles,
|
FrameworkRoles: s.frameworkRoles,
|
||||||
|
GenerateTaskDiscoveryEnabled: s.generateTaskDiscovery,
|
||||||
|
HostPortStrategy: hostPortStrategy,
|
||||||
|
Prototype: eiPrototype,
|
||||||
|
},
|
||||||
s.defaultContainerCPULimit,
|
s.defaultContainerCPULimit,
|
||||||
s.defaultContainerMemLimit,
|
s.defaultContainerMemLimit,
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@ -113,6 +113,6 @@ func Test_DefaultResourceLimits(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
s := NewSchedulerServer()
|
s := NewSchedulerServer()
|
||||||
assert.Equal(s.defaultContainerCPULimit, mresource.DefaultDefaultContainerCPULimit)
|
assert.Equal(s.defaultContainerCPULimit, resources.DefaultDefaultContainerCPULimit)
|
||||||
assert.Equal(s.defaultContainerMemLimit, mresource.DefaultDefaultContainerMemLimit)
|
assert.Equal(s.defaultContainerMemLimit, resources.DefaultDefaultContainerMemLimit)
|
||||||
}
|
}
|
||||||
|
@ -19,23 +19,23 @@ package service
|
|||||||
import (
|
import (
|
||||||
log "github.com/golang/glog"
|
log "github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StaticPodValidator discards a pod if we can't calculate resource limits for it.
|
// StaticPodValidator discards a pod if we can't calculate resource limits for it.
|
||||||
func StaticPodValidator(
|
func StaticPodValidator(
|
||||||
defaultContainerCPULimit resource.CPUShares,
|
defaultContainerCPULimit resources.CPUShares,
|
||||||
defaultContainerMemLimit resource.MegaBytes,
|
defaultContainerMemLimit resources.MegaBytes,
|
||||||
accumCPU, accumMem *float64,
|
accumCPU, accumMem *float64,
|
||||||
) podutil.FilterFunc {
|
) podutil.FilterFunc {
|
||||||
return podutil.FilterFunc(func(pod *api.Pod) (bool, error) {
|
return podutil.FilterFunc(func(pod *api.Pod) (bool, error) {
|
||||||
_, cpu, _, err := resource.LimitPodCPU(pod, defaultContainerCPULimit)
|
_, cpu, _, err := resources.LimitPodCPU(pod, defaultContainerCPULimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, mem, _, err := resource.LimitPodMem(pod, defaultContainerMemLimit)
|
_, mem, _, err := resources.LimitPodMem(pod, defaultContainerMemLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
"k8s.io/kubernetes/contrib/mesos/pkg/podutil"
|
||||||
mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources"
|
||||||
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/service"
|
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/service"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
@ -143,7 +143,7 @@ func containers(ct ...api.Container) podOpt {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceLimits(cpu mresource.CPUShares, mem mresource.MegaBytes) ctOpt {
|
func resourceLimits(cpu resources.CPUShares, mem resources.MegaBytes) ctOpt {
|
||||||
return ctOpt(func(c *api.Container) {
|
return ctOpt(func(c *api.Container) {
|
||||||
if c.Resources.Limits == nil {
|
if c.Resources.Limits == nil {
|
||||||
c.Resources.Limits = make(api.ResourceList)
|
c.Resources.Limits = make(api.ResourceList)
|
||||||
|
@ -6,7 +6,6 @@ file_owner
|
|||||||
file_perm
|
file_perm
|
||||||
fs_type
|
fs_type
|
||||||
gke_context
|
gke_context
|
||||||
host_port_endpoints
|
|
||||||
max_in_flight
|
max_in_flight
|
||||||
max_par
|
max_par
|
||||||
new_file_0644
|
new_file_0644
|
||||||
|
@ -138,6 +138,7 @@ horizontal-pod-autoscaler-sync-period
|
|||||||
host-ipc-sources
|
host-ipc-sources
|
||||||
host-network-sources
|
host-network-sources
|
||||||
host-pid-sources
|
host-pid-sources
|
||||||
|
host-port-endpoints
|
||||||
hostname-override
|
hostname-override
|
||||||
http-check-frequency
|
http-check-frequency
|
||||||
http-port
|
http-port
|
||||||
|
Loading…
Reference in New Issue
Block a user