Merge pull request #19696 from mesosphere/jdef_mesos_pod_discovery

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-01-28 01:15:40 -08:00
commit d3fdc61ff7
5 changed files with 112 additions and 50 deletions

View File

@ -18,21 +18,24 @@ package meta
// kubernetes api object annotations // kubernetes api object annotations
const ( const (
// Namespace is the label and annotation namespace for mesos keys
Namespace = "k8s.mesosphere.io"
// the BindingHostKey pod annotation marks a pod as being assigned to a Mesos // the BindingHostKey pod annotation marks a pod as being assigned to a Mesos
// slave. It is already or will be launched on the slave as a task. // slave. It is already or will be launched on the slave as a task.
BindingHostKey = "k8s.mesosphere.io/bindingHost" BindingHostKey = Namespace + "/bindingHost"
TaskIdKey = "k8s.mesosphere.io/taskId" TaskIdKey = Namespace + "/taskId"
SlaveIdKey = "k8s.mesosphere.io/slaveId" SlaveIdKey = Namespace + "/slaveId"
OfferIdKey = "k8s.mesosphere.io/offerId" OfferIdKey = Namespace + "/offerId"
ExecutorIdKey = "k8s.mesosphere.io/executorId" ExecutorIdKey = Namespace + "/executorId"
ExecutorResourcesKey = "k8s.mesosphere.io/executorResources" ExecutorResourcesKey = Namespace + "/executorResources"
PortMappingKey = "k8s.mesosphere.io/portMapping" PortMappingKey = Namespace + "/portMapping"
PortMappingKeyPrefix = "k8s.mesosphere.io/port_" PortMappingKeyPrefix = Namespace + "/port_"
PortMappingKeyFormat = PortMappingKeyPrefix + "%s_%d" PortMappingKeyFormat = PortMappingKeyPrefix + "%s_%d"
PortNameMappingKeyPrefix = "k8s.mesosphere.io/portName_" PortNameMappingKeyPrefix = Namespace + "/portName_"
PortNameMappingKeyFormat = PortNameMappingKeyPrefix + "%s_%s" PortNameMappingKeyFormat = PortNameMappingKeyPrefix + "%s_%s"
ContainerPortKeyFormat = "k8s.mesosphere.io/containerPort_%s_%s_%d" ContainerPortKeyFormat = Namespace + "/containerPort_%s_%s_%d"
StaticPodFilenameKey = "k8s.mesosphere.io/staticPodFilename" StaticPodFilenameKey = Namespace + "/staticPodFilename"
RolesKey = "k8s.mesosphere.io/roles" RolesKey = Namespace + "/roles"
) )

View File

@ -25,7 +25,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"k8s.io/kubernetes/contrib/mesos/pkg/offers" "k8s.io/kubernetes/contrib/mesos/pkg/offers"
annotation "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta" mesosmeta "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta"
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -127,7 +127,58 @@ func generateTaskName(pod *api.Pod) string {
if ns == "" { if ns == "" {
ns = api.NamespaceDefault ns = api.NamespaceDefault
} }
return fmt.Sprintf("%s.%s.pods", pod.Name, ns) return fmt.Sprintf("%s.%s.pod", pod.Name, ns)
}
// GenerateTaskDiscoveryEnabled turns on/off the generation of DiscoveryInfo for TaskInfo records
var GenerateTaskDiscoveryEnabled = false
func generateTaskDiscovery(pod *api.Pod) *mesos.DiscoveryInfo {
di := &mesos.DiscoveryInfo{
Visibility: mesos.DiscoveryInfo_CLUSTER.Enum(),
}
switch visibility := pod.Annotations[mesosmeta.Namespace+"/discovery-visibility"]; visibility {
case "framework":
di.Visibility = mesos.DiscoveryInfo_FRAMEWORK.Enum()
case "external":
di.Visibility = mesos.DiscoveryInfo_EXTERNAL.Enum()
case "", "cluster":
// noop, pick the default we already set
default:
// default to CLUSTER, just warn the user
log.Warningf("unsupported discovery-visibility annotation: %q", visibility)
}
// name should be {{label|annotation}:name}.{pod:namespace}.pod
nameDecorator := func(n string) *string {
ns := pod.Namespace
if ns == "" {
ns = api.NamespaceDefault
}
x := n + "." + ns + "." + "pod"
return &x
}
for _, tt := range []struct {
fieldName string
dest **string
decorator func(string) *string
}{
{"name", &di.Name, nameDecorator},
{"environment", &di.Environment, nil},
{"location", &di.Location, nil},
{"version", &di.Version, nil},
} {
d := tt.decorator
if d == nil {
d = func(s string) *string { return &s }
}
if v, ok := pod.Labels[tt.fieldName]; ok && v != "" {
*tt.dest = d(v)
}
if v, ok := pod.Annotations[mesosmeta.Namespace+"/discovery-"+tt.fieldName]; ok && v != "" {
*tt.dest = d(v)
}
}
return di
} }
func (t *T) BuildTaskInfo() (*mesos.TaskInfo, error) { func (t *T) BuildTaskInfo() (*mesos.TaskInfo, error) {
@ -144,6 +195,10 @@ func (t *T) BuildTaskInfo() (*mesos.TaskInfo, error) {
SlaveId: mutil.NewSlaveID(t.Spec.SlaveID), SlaveId: mutil.NewSlaveID(t.Spec.SlaveID),
} }
if GenerateTaskDiscoveryEnabled {
info.Discovery = generateTaskDiscovery(&t.Pod)
}
return info, nil return info, nil
} }
@ -173,7 +228,7 @@ func (t *T) Has(f FlagType) (exists bool) {
// If the pod has roles annotations defined they are being used // If the pod has roles annotations defined they are being used
// else default pod roles are being returned. // else default pod roles are being returned.
func (t *T) Roles() (result []string) { func (t *T) Roles() (result []string) {
if r, ok := t.Pod.ObjectMeta.Annotations[annotation.RolesKey]; ok { if r, ok := t.Pod.ObjectMeta.Annotations[mesosmeta.RolesKey]; ok {
roles := strings.Split(r, ",") roles := strings.Split(r, ",")
for i, r := range roles { for i, r := range roles {
@ -229,10 +284,10 @@ func New(ctx api.Context, id string, pod *api.Pod, prototype *mesos.ExecutorInfo
} }
func (t *T) SaveRecoveryInfo(dict map[string]string) { func (t *T) SaveRecoveryInfo(dict map[string]string) {
dict[annotation.TaskIdKey] = t.ID dict[mesosmeta.TaskIdKey] = t.ID
dict[annotation.SlaveIdKey] = t.Spec.SlaveID dict[mesosmeta.SlaveIdKey] = t.Spec.SlaveID
dict[annotation.OfferIdKey] = t.Offer.Details().Id.GetValue() dict[mesosmeta.OfferIdKey] = t.Offer.Details().Id.GetValue()
dict[annotation.ExecutorIdKey] = t.Spec.Executor.ExecutorId.GetValue() dict[mesosmeta.ExecutorIdKey] = t.Spec.Executor.ExecutorId.GetValue()
} }
// reconstruct a task from metadata stashed in a pod entry. there are limited pod states that // reconstruct a task from metadata stashed in a pod entry. there are limited pod states that
@ -287,25 +342,25 @@ func RecoverFrom(pod api.Pod) (*T, bool, error) {
offerId string offerId string
) )
for _, k := range []string{ for _, k := range []string{
annotation.BindingHostKey, mesosmeta.BindingHostKey,
annotation.TaskIdKey, mesosmeta.TaskIdKey,
annotation.SlaveIdKey, mesosmeta.SlaveIdKey,
annotation.OfferIdKey, mesosmeta.OfferIdKey,
} { } {
v, found := pod.Annotations[k] v, found := pod.Annotations[k]
if !found { if !found {
return nil, false, fmt.Errorf("incomplete metadata: missing value for pod annotation: %v", k) return nil, false, fmt.Errorf("incomplete metadata: missing value for pod annotation: %v", k)
} }
switch k { switch k {
case annotation.BindingHostKey: case mesosmeta.BindingHostKey:
t.Spec.AssignedSlave = v t.Spec.AssignedSlave = v
case annotation.SlaveIdKey: case mesosmeta.SlaveIdKey:
t.Spec.SlaveID = v t.Spec.SlaveID = v
case annotation.OfferIdKey: case mesosmeta.OfferIdKey:
offerId = v offerId = v
case annotation.TaskIdKey: case mesosmeta.TaskIdKey:
t.ID = v t.ID = v
case annotation.ExecutorIdKey: case mesosmeta.ExecutorIdKey:
// this is nowhere near sufficient to re-launch a task, but we really just // this is nowhere near sufficient to re-launch a task, but we really just
// want this for tracking // want this for tracking
t.Spec.Executor = &mesos.ExecutorInfo{ExecutorId: mutil.NewExecutorID(v)} t.Spec.Executor = &mesos.ExecutorInfo{ExecutorId: mutil.NewExecutorID(v)}

View File

@ -299,14 +299,14 @@ func TestGeneratePodName(t *testing.T) {
}, },
} }
name := generateTaskName(p) name := generateTaskName(p)
expected := "foo.bar.pods" expected := "foo.bar.pod"
if name != expected { if name != expected {
t.Fatalf("expected %q instead of %q", expected, name) t.Fatalf("expected %q instead of %q", expected, name)
} }
p.Namespace = "" p.Namespace = ""
name = generateTaskName(p) name = generateTaskName(p)
expected = "foo.default.pods" expected = "foo.default.pod"
if name != expected { if name != expected {
t.Fatalf("expected %q instead of %q", expected, name) t.Fatalf("expected %q instead of %q", expected, name)
} }

View File

@ -116,6 +116,7 @@ type SchedulerServer struct {
mesosExecutorMem mresource.MegaBytes mesosExecutorMem mresource.MegaBytes
checkpoint bool checkpoint bool
failoverTimeout float64 failoverTimeout float64
generateTaskDiscovery bool
executorLogV int executorLogV int
executorBindall bool executorBindall bool
@ -262,6 +263,7 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) {
fs.Var(&s.mesosExecutorMem, "mesos-executor-mem", "Initial memory (MB) to allocate for each Mesos executor container.") fs.Var(&s.mesosExecutorMem, "mesos-executor-mem", "Initial memory (MB) to allocate for each Mesos executor container.")
fs.BoolVar(&s.checkpoint, "checkpoint", s.checkpoint, "Enable/disable checkpointing for the kubernetes-mesos framework.") fs.BoolVar(&s.checkpoint, "checkpoint", s.checkpoint, "Enable/disable checkpointing for the kubernetes-mesos framework.")
fs.Float64Var(&s.failoverTimeout, "failover-timeout", s.failoverTimeout, fmt.Sprintf("Framework failover timeout, in sec.")) fs.Float64Var(&s.failoverTimeout, "failover-timeout", s.failoverTimeout, fmt.Sprintf("Framework failover timeout, in sec."))
fs.BoolVar(&s.generateTaskDiscovery, "mesos-generate-task-discovery", s.generateTaskDiscovery, "Enable/disable generation of DiscoveryInfo for Mesos tasks.")
fs.UintVar(&s.driverPort, "driver-port", s.driverPort, "Port that the Mesos scheduler driver process should listen on.") fs.UintVar(&s.driverPort, "driver-port", s.driverPort, "Port that the Mesos scheduler driver process should listen on.")
fs.StringVar(&s.hostnameOverride, "hostname-override", s.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") fs.StringVar(&s.hostnameOverride, "hostname-override", s.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Int64Var(&s.reconcileInterval, "reconcile-interval", s.reconcileInterval, "Interval at which to execute task reconciliation, in sec. Zero disables.") fs.Int64Var(&s.reconcileInterval, "reconcile-interval", s.reconcileInterval, "Interval at which to execute task reconciliation, in sec. Zero disables.")
@ -553,6 +555,7 @@ func (s *SchedulerServer) getDriver() (driver bindings.SchedulerDriver) {
} }
func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error { func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error {
podtask.GenerateTaskDiscoveryEnabled = s.generateTaskDiscovery
if n := len(s.frameworkRoles); n == 0 || n > 2 || (n == 2 && s.frameworkRoles[0] != "*" && s.frameworkRoles[1] != "*") { if n := len(s.frameworkRoles); n == 0 || n > 2 || (n == 2 && s.frameworkRoles[0] != "*" && s.frameworkRoles[1] != "*") {
log.Fatalf(`only one custom role allowed in addition to "*"`) log.Fatalf(`only one custom role allowed in addition to "*"`)
} }

View File

@ -212,6 +212,7 @@ mesos-default-pod-roles
mesos-executor-cpus mesos-executor-cpus
mesos-executor-mem mesos-executor-mem
mesos-framework-roles mesos-framework-roles
mesos-generate-task-discovery
mesos-launch-grace-period mesos-launch-grace-period
mesos-master mesos-master
mesos-sandbox-overlay mesos-sandbox-overlay