From bff3cb4df68d699285285c09c1c46be69ef1465a Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Fri, 18 Nov 2016 13:34:25 -0800 Subject: [PATCH] pkg/api, pkg/apis: Copied constants to versioned types.go, copied several util funcs to versioned --- pkg/api/pod/util.go | 30 -- pkg/api/service/annotations.go | 12 - pkg/api/testing/pod_specs.go | 12 + pkg/api/v1/endpoints/util.go | 238 +++++++++ pkg/api/v1/endpoints/util_test.go | 464 +++++++++++++++++ pkg/api/v1/generate.go | 64 +++ pkg/api/v1/helpers.go | 617 ++++++++++++++++++++++- pkg/api/v1/helpers_test.go | 541 ++++++++++++++++++++ pkg/api/v1/pod/util.go | 120 +++++ pkg/api/{ => v1}/pod/util_test.go | 30 +- pkg/api/v1/ref.go | 133 +++++ pkg/api/v1/resource_helpers.go | 229 +++++++++ pkg/api/v1/resource_helpers_test.go | 120 +++++ pkg/api/v1/service/BUILD | 36 ++ pkg/api/v1/service/annotations.go | 111 ++++ pkg/api/v1/service/util.go | 68 +++ pkg/api/v1/service/util_test.go | 131 +++++ pkg/api/v1/types.go | 41 ++ pkg/api/v1/validation/validation.go | 159 ++++++ pkg/apis/rbac/v1alpha1/helpers.go | 148 ++++++ pkg/apis/rbac/v1alpha1/types.go | 18 + pkg/apis/storage/v1beta1/util/helpers.go | 134 +++++ 22 files changed, 3396 insertions(+), 60 deletions(-) create mode 100644 pkg/api/v1/endpoints/util.go create mode 100644 pkg/api/v1/endpoints/util_test.go create mode 100644 pkg/api/v1/generate.go create mode 100644 pkg/api/v1/helpers_test.go create mode 100644 pkg/api/v1/pod/util.go rename pkg/api/{ => v1}/pod/util_test.go (81%) create mode 100644 pkg/api/v1/ref.go create mode 100644 pkg/api/v1/resource_helpers.go create mode 100644 pkg/api/v1/resource_helpers_test.go create mode 100644 pkg/api/v1/service/BUILD create mode 100644 pkg/api/v1/service/annotations.go create mode 100644 pkg/api/v1/service/util.go create mode 100644 pkg/api/v1/service/util_test.go create mode 100644 pkg/api/v1/validation/validation.go create mode 100644 pkg/apis/rbac/v1alpha1/helpers.go create mode 100644 pkg/apis/storage/v1beta1/util/helpers.go diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index dfc12db6088..0675a50e225 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -16,13 +16,6 @@ limitations under the License. package pod -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/intstr" -) - const ( // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' @@ -36,26 +29,3 @@ const ( // .my-web-service..svc." would be resolved by the cluster DNS Server. PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain" ) - -// FindPort locates the container port for the given pod and portName. If the -// targetPort is a number, use that. If the targetPort is a string, look that -// string up in all named ports in all containers in the target pod. If no -// match is found, fail. -func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { - portName := svcPort.TargetPort - switch portName.Type { - case intstr.String: - name := portName.StrVal - for _, container := range pod.Spec.Containers { - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - case intstr.Int: - return portName.IntValue(), nil - } - - return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) -} diff --git a/pkg/api/service/annotations.go b/pkg/api/service/annotations.go index 4b45549983b..42fd68b7593 100644 --- a/pkg/api/service/annotations.go +++ b/pkg/api/service/annotations.go @@ -97,15 +97,3 @@ func GetServiceHealthCheckNodePort(service *api.Service) int32 { } return 0 } - -// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check -func GetServiceHealthCheckPathPort(service *api.Service) (string, int32) { - if !NeedsHealthCheck(service) { - return "", 0 - } - port := GetServiceHealthCheckNodePort(service) - if port == 0 { - return "", 0 - } - return "/healthz", port -} diff --git a/pkg/api/testing/pod_specs.go b/pkg/api/testing/pod_specs.go index d54606b300e..87300a3da73 100644 --- a/pkg/api/testing/pod_specs.go +++ b/pkg/api/testing/pod_specs.go @@ -18,6 +18,7 @@ package testing import ( "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // DeepEqualSafePodSpec returns a PodSpec which is ready to be used with api.Semantic.DeepEqual @@ -30,3 +31,14 @@ func DeepEqualSafePodSpec() api.PodSpec { SecurityContext: &api.PodSecurityContext{}, } } + +// V1DeepEqualSafePodSpec returns a PodSpec which is ready to be used with api.Semantic.DeepEqual +func V1DeepEqualSafePodSpec() v1.PodSpec { + grace := int64(30) + return v1.PodSpec{ + RestartPolicy: v1.RestartPolicyAlways, + DNSPolicy: v1.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &v1.PodSecurityContext{}, + } +} diff --git a/pkg/api/v1/endpoints/util.go b/pkg/api/v1/endpoints/util.go new file mode 100644 index 00000000000..c81999f370f --- /dev/null +++ b/pkg/api/v1/endpoints/util.go @@ -0,0 +1,238 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "hash" + "sort" + + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/types" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +const ( + // TODO: to be deleted after v1.3 is released + // Its value is the json representation of map[string(IP)][HostRecord] + // example: '{"10.245.1.6":{"HostName":"my-webserver"}}' + PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map" +) + +// TODO: to be deleted after v1.3 is released +type HostRecord struct { + HostName string +} + +// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full +// representation, and then repacks that into the canonical layout. This +// ensures that code which operates on these objects can rely on the common +// form for things like comparison. The result is a newly allocated slice. +func RepackSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset { + // First map each unique port definition to the sets of hosts that + // offer it. + allAddrs := map[addressKey]*v1.EndpointAddress{} + portToAddrReadyMap := map[v1.EndpointPort]addressSet{} + for i := range subsets { + for _, port := range subsets[i].Ports { + for k := range subsets[i].Addresses { + mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap) + } + for k := range subsets[i].NotReadyAddresses { + mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap) + } + } + } + + // Next, map the sets of hosts to the sets of ports they offer. + // Go does not allow maps or slices as keys to maps, so we have + // to synthesize an artificial key and do a sort of 2-part + // associative entity. + type keyString string + keyToAddrReadyMap := map[keyString]addressSet{} + addrReadyMapKeyToPorts := map[keyString][]v1.EndpointPort{} + for port, addrs := range portToAddrReadyMap { + key := keyString(hashAddresses(addrs)) + keyToAddrReadyMap[key] = addrs + addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port) + } + + // Next, build the N-to-M association the API wants. + final := []v1.EndpointSubset{} + for key, ports := range addrReadyMapKeyToPorts { + var readyAddrs, notReadyAddrs []v1.EndpointAddress + for addr, ready := range keyToAddrReadyMap[key] { + if ready { + readyAddrs = append(readyAddrs, *addr) + } else { + notReadyAddrs = append(notReadyAddrs, *addr) + } + } + final = append(final, v1.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports}) + } + + // Finally, sort it. + return SortSubsets(final) +} + +// The sets of hosts must be de-duped, using IP+UID as the key. +type addressKey struct { + ip string + uid types.UID +} + +// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving +// any existing ready state. +func mapAddressByPort(addr *v1.EndpointAddress, port v1.EndpointPort, ready bool, allAddrs map[addressKey]*v1.EndpointAddress, portToAddrReadyMap map[v1.EndpointPort]addressSet) *v1.EndpointAddress { + // use addressKey to distinguish between two endpoints that are identical addresses + // but may have come from different hosts, for attribution. For instance, Mesos + // assigns pods the node IP, but the pods are distinct. + key := addressKey{ip: addr.IP} + if addr.TargetRef != nil { + key.uid = addr.TargetRef.UID + } + + // Accumulate the address. The full EndpointAddress structure is preserved for use when + // we rebuild the subsets so that the final TargetRef has all of the necessary data. + existingAddress := allAddrs[key] + if existingAddress == nil { + // Make a copy so we don't write to the + // input args of this function. + existingAddress = &v1.EndpointAddress{} + *existingAddress = *addr + allAddrs[key] = existingAddress + } + + // Remember that this port maps to this address. + if _, found := portToAddrReadyMap[port]; !found { + portToAddrReadyMap[port] = addressSet{} + } + // if we have not yet recorded this port for this address, or if the previous + // state was ready, write the current ready state. not ready always trumps + // ready. + if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady { + portToAddrReadyMap[port][existingAddress] = ready + } + return existingAddress +} + +type addressSet map[*v1.EndpointAddress]bool + +type addrReady struct { + addr *v1.EndpointAddress + ready bool +} + +func hashAddresses(addrs addressSet) string { + // Flatten the list of addresses into a string so it can be used as a + // map key. Unfortunately, DeepHashObject is implemented in terms of + // spew, and spew does not handle non-primitive map keys well. So + // first we collapse it into a slice, sort the slice, then hash that. + slice := make([]addrReady, 0, len(addrs)) + for k, ready := range addrs { + slice = append(slice, addrReady{k, ready}) + } + sort.Sort(addrsReady(slice)) + hasher := md5.New() + hashutil.DeepHashObject(hasher, slice) + return hex.EncodeToString(hasher.Sum(nil)[0:]) +} + +func lessAddrReady(a, b addrReady) bool { + // ready is not significant to hashing since we can't have duplicate addresses + return LessEndpointAddress(a.addr, b.addr) +} + +type addrsReady []addrReady + +func (sl addrsReady) Len() int { return len(sl) } +func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsReady) Less(i, j int) bool { + return lessAddrReady(sl[i], sl[j]) +} + +func LessEndpointAddress(a, b *v1.EndpointAddress) bool { + ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) + if ipComparison != 0 { + return ipComparison < 0 + } + if b.TargetRef == nil { + return false + } + if a.TargetRef == nil { + return true + } + return a.TargetRef.UID < b.TargetRef.UID +} + +type addrPtrsByIpAndUID []*v1.EndpointAddress + +func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } +func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrPtrsByIpAndUID) Less(i, j int) bool { + return LessEndpointAddress(sl[i], sl[j]) +} + +// SortSubsets sorts an array of EndpointSubset objects in place. For ease of +// use it returns the input slice. +func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset { + for i := range subsets { + ss := &subsets[i] + sort.Sort(addrsByIpAndUID(ss.Addresses)) + sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) + sort.Sort(portsByHash(ss.Ports)) + } + sort.Sort(subsetsByHash(subsets)) + return subsets +} + +func hashObject(hasher hash.Hash, obj interface{}) []byte { + hashutil.DeepHashObject(hasher, obj) + return hasher.Sum(nil) +} + +type subsetsByHash []v1.EndpointSubset + +func (sl subsetsByHash) Len() int { return len(sl) } +func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl subsetsByHash) Less(i, j int) bool { + hasher := md5.New() + h1 := hashObject(hasher, sl[i]) + h2 := hashObject(hasher, sl[j]) + return bytes.Compare(h1, h2) < 0 +} + +type addrsByIpAndUID []v1.EndpointAddress + +func (sl addrsByIpAndUID) Len() int { return len(sl) } +func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsByIpAndUID) Less(i, j int) bool { + return LessEndpointAddress(&sl[i], &sl[j]) +} + +type portsByHash []v1.EndpointPort + +func (sl portsByHash) Len() int { return len(sl) } +func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl portsByHash) Less(i, j int) bool { + hasher := md5.New() + h1 := hashObject(hasher, sl[i]) + h2 := hashObject(hasher, sl[j]) + return bytes.Compare(h1, h2) < 0 +} diff --git a/pkg/api/v1/endpoints/util_test.go b/pkg/api/v1/endpoints/util_test.go new file mode 100644 index 00000000000..e73a948650c --- /dev/null +++ b/pkg/api/v1/endpoints/util_test.go @@ -0,0 +1,464 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/types" +) + +func podRef(uid string) *v1.ObjectReference { + ref := v1.ObjectReference{UID: types.UID(uid)} + return &ref +} + +func TestPackSubsets(t *testing.T) { + // The downside of table-driven tests is that some things have to live outside the table. + fooObjRef := v1.ObjectReference{Name: "foo"} + barObjRef := v1.ObjectReference{Name: "bar"} + + testCases := []struct { + name string + given []v1.EndpointSubset + expect []v1.EndpointSubset + }{ + { + name: "empty everything", + given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{}, Ports: []v1.EndpointPort{}}}, + expect: []v1.EndpointSubset{}, + }, { + name: "empty addresses", + given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{}, Ports: []v1.EndpointPort{{Port: 111}}}}, + expect: []v1.EndpointSubset{}, + }, { + name: "empty ports", + given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []v1.EndpointPort{}}}, + expect: []v1.EndpointSubset{}, + }, { + name: "empty ports", + given: []v1.EndpointSubset{{NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []v1.EndpointPort{}}}, + expect: []v1.EndpointSubset{}, + }, { + name: "one set, one ip, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, one port (IPv6)", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "beef::1:2:3:4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "beef::1:2:3:4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, one port", + given: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, one UID, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, one UID, one port", + given: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, empty UID, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, empty UID, one port", + given: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two ips, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two mixed ips, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two duplicate ips, one port, notReady is covered by ready", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, two ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "one set, dup ips, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup ips, one port (IPv6)", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "beef::1"}, {IP: "beef::1"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "beef::1"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup ips with target-refs, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &fooObjRef}, + {IP: "1.2.3.4", TargetRef: &barObjRef}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup mixed ips with target-refs, one port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &fooObjRef}, + }, + NotReadyAddresses: []v1.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &barObjRef}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + // finding the same address twice is considered an error on input, only the first address+port + // reference is preserved + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, dup ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}, {Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup ip, dup port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup mixed ip, dup port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup ip, two ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "two sets, dup ip, dup uids, two ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "two sets, dup mixed ip, dup uids, two ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + }, { + name: "two sets, two ips, dup port", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two set, dup ip, two uids, dup ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "1.2.3.4", TargetRef: podRef("uid-2")}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two set, dup ip, with and without uid, dup ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-2")}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two ips, two dup ip with uid, dup port, wrong order", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "5.6.7.8"}, + {IP: "5.6.7.8", TargetRef: podRef("uid-1")}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two mixed ips, two dup ip with uid, dup port, wrong order, ends up with split addresses", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{ + {IP: "5.6.7.8"}, + }, + NotReadyAddresses: []v1.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "5.6.7.8", TargetRef: podRef("uid-1")}, + }, + Ports: []v1.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two ips, two ports", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }}, + }, { + name: "four sets, three ips, three ports, jumbled", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.6"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 333}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 222}, {Port: 333}}, + }}, + }, { + name: "four sets, three mixed ips, three ports, jumbled", + given: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 222}}, + }, { + Addresses: []v1.EndpointAddress{{IP: "1.2.3.6"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 333}}, + }}, + expect: []v1.EndpointSubset{{ + Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}, + Ports: []v1.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []v1.EndpointPort{{Port: 222}, {Port: 333}}, + }}, + }, + } + + for _, tc := range testCases { + result := RepackSubsets(tc.given) + if !reflect.DeepEqual(result, SortSubsets(tc.expect)) { + t.Errorf("case %q: expected %s, got %s", tc.name, spew.Sprintf("%#v", SortSubsets(tc.expect)), spew.Sprintf("%#v", result)) + } + } +} diff --git a/pkg/api/v1/generate.go b/pkg/api/v1/generate.go new file mode 100644 index 00000000000..b6d1b347eba --- /dev/null +++ b/pkg/api/v1/generate.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + utilrand "k8s.io/kubernetes/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if +// necessary. It expects that validation for ObjectMeta has already completed (that Base is a +// valid name) and that the NameGenerator generates a name that is also valid. +func GenerateName(u NameGenerator, meta *ObjectMeta) { + if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { + return + } + meta.Name = u.GenerateName(meta.GenerateName) +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/pkg/api/v1/helpers.go b/pkg/api/v1/helpers.go index 5ea0d329ff6..77dae25f87e 100644 --- a/pkg/api/v1/helpers.go +++ b/pkg/api/v1/helpers.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,18 +16,629 @@ limitations under the License. package v1 -import "k8s.io/kubernetes/pkg/types" +import ( + "crypto/md5" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/selection" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/sets" + + "github.com/davecgh/go-spew/spew" +) + +// Conversion error conveniently packages up errors in conversions. +type ConversionError struct { + In, Out interface{} + Message string +} + +// Return a helpful string about the error +func (c *ConversionError) Error() string { + return spew.Sprintf( + "Conversion error: %s. (in: %v(%+v) out: %v)", + c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out), + ) +} + +// Semantic can do semantic deep equality checks for api objects. +// Example: api.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b unversioned.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(ResourceQuotaScopeTerminating), + string(ResourceQuotaScopeNotTerminating), + string(ResourceQuotaScopeBestEffort), + string(ResourceQuotaScopeNotBestEffort), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool { + switch scope { + case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) +} + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), api.ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) +} + +var standardLimitRangeTypes = sets.NewString( + string(LimitTypePod), + string(LimitTypeContainer), + string(LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceRequestsStorage), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourcePersistentVolumeClaims), + string(ResourceConfigMaps), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) +} + +var standardResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceStorage), + string(ResourceRequestsStorage), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) +} + +var integerResources = sets.NewString( + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsOpaqueIntResourceName(ResourceName(str)) +} // NewDeleteOptions returns a DeleteOptions indicating the resource should // be deleted within the specified grace period. Use zero to indicate // immediate deletion. If you would prefer to use the default grace period, -// use &v1.DeleteOptions{} directly. +// use &api.DeleteOptions{} directly. func NewDeleteOptions(grace int64) *DeleteOptions { return &DeleteOptions{GracePeriodSeconds: &grace} } +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + // NewUIDPreconditions returns a Preconditions with UID set. func NewUIDPreconditions(uid string) *Preconditions { u := types.UID(uid) return &Preconditions{UID: &u} } + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + FinalizerOrphan, +) + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { + data, err := runtime.Encode(codec, obj) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", md5.Sum(data)), nil +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return unversioned.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return unversioned.Time{}, err + } + return unversioned.Time{Time: t}, nil +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" +) + +// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in api. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) { + var tolerations []Toleration + if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in api. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { + var taints []Taint + if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) + if err != nil { + return []Taint{}, err + } + } + return taints, nil +} + +// TolerationToleratesTaint checks if the toleration tolerates the taint. +func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool { + if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { + return false + } + + if toleration.Key != taint.Key { + return false + } + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { + return true + } + if toleration.Operator == TolerationOpExists { + return true + } + return false +} + +// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. +func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { + tolerated := false + for i := range tolerations { + if TolerationToleratesTaint(&tolerations[i], taint) { + tolerated = true + break + } + } + return tolerated +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { + var avoidPods AvoidPods + if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +type Sysctl struct { + Name string + Value string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList +} diff --git a/pkg/api/v1/helpers_test.go b/pkg/api/v1/helpers_test.go new file mode 100644 index 00000000000..28c3cec035d --- /dev/null +++ b/pkg/api/v1/helpers_test.go @@ -0,0 +1,541 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/labels" +) + +func TestConversionError(t *testing.T) { + var i int + var s string + i = 3 + s = "foo" + c := ConversionError{ + In: &i, Out: &s, + Message: "Can't make x into y, silly", + } + var e error + e = &c // ensure it implements error + msg := e.Error() + t.Logf("Message is %v", msg) + for _, part := range []string{"3", "int", "string", "Can't"} { + if !strings.Contains(msg, part) { + t.Errorf("didn't find %v", part) + } + } +} + +func TestSemantic(t *testing.T) { + table := []struct { + a, b interface{} + shouldEqual bool + }{ + {resource.MustParse("0"), resource.Quantity{}, true}, + {resource.Quantity{}, resource.MustParse("0"), true}, + {resource.Quantity{}, resource.MustParse("1m"), false}, + { + resource.NewQuantity(5, resource.BinarySI), + resource.NewQuantity(5, resource.DecimalSI), + true, + }, + {resource.MustParse("2m"), resource.MustParse("1m"), false}, + } + + for index, item := range table { + if e, a := item.shouldEqual, Semantic.DeepEqual(item.a, item.b); e != a { + t.Errorf("case[%d], expected %v, got %v.", index, e, a) + } + } +} + +func TestIsStandardResource(t *testing.T) { + testCases := []struct { + input string + output bool + }{ + {"cpu", true}, + {"memory", true}, + {"disk", false}, + {"blah", false}, + {"x.y.z", false}, + } + for i, tc := range testCases { + if IsStandardResourceName(tc.input) != tc.output { + t.Errorf("case[%d], expected: %t, got: %t", i, tc.output, !tc.output) + } + } +} + +func TestAddToNodeAddresses(t *testing.T) { + testCases := []struct { + existing []NodeAddress + toAdd []NodeAddress + expected []NodeAddress + }{ + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{}, + expected: []NodeAddress{}, + }, + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + }, + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeExternalIP, Address: "1.1.1.1"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + }, + }, + { + existing: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeInternalIP, Address: "10.1.1.1"}, + }, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeInternalIP, Address: "10.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + }, + } + + for i, tc := range testCases { + AddToNodeAddresses(&tc.existing, tc.toAdd...) + if !Semantic.DeepEqual(tc.expected, tc.existing) { + t.Errorf("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing) + } + } +} + +func TestGetAccessModesFromString(t *testing.T) { + modes := GetAccessModesFromString("ROX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + + modes = GetAccessModesFromString("ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } + + modes = GetAccessModesFromString("RWO,ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } +} + +func TestRemoveDuplicateAccessModes(t *testing.T) { + modes := []PersistentVolumeAccessMode{ + ReadWriteOnce, ReadOnlyMany, ReadOnlyMany, ReadOnlyMany, + } + modes = removeDuplicateAccessModes(modes) + if len(modes) != 2 { + t.Errorf("Expected 2 distinct modes in set but found %v", len(modes)) + } +} + +func TestNodeSelectorRequirementsAsSelector(t *testing.T) { + matchExpressions := []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpIn, + Values: []string{"bar", "baz"}, + }} + mustParse := func(s string) labels.Selector { + out, e := labels.Parse(s) + if e != nil { + panic(e) + } + return out + } + tc := []struct { + in []NodeSelectorRequirement + out labels.Selector + expectErr bool + }{ + {in: nil, out: labels.Nothing()}, + {in: []NodeSelectorRequirement{}, out: labels.Nothing()}, + { + in: matchExpressions, + out: mustParse("foo in (baz,bar)"), + }, + { + in: []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpExists, + Values: []string{"bar", "baz"}, + }}, + expectErr: true, + }, + { + in: []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpGt, + Values: []string{"1"}, + }}, + out: mustParse("foo>1"), + }, + { + in: []NodeSelectorRequirement{{ + Key: "bar", + Operator: NodeSelectorOpLt, + Values: []string{"7"}, + }}, + out: mustParse("bar<7"), + }, + } + + for i, tc := range tc { + out, err := NodeSelectorRequirementsAsSelector(tc.in) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + if !reflect.DeepEqual(out, tc.out) { + t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out) + } + } +} + +func TestGetAffinityFromPod(t *testing.T) { + testCases := []struct { + pod *Pod + expectErr bool + }{ + { + pod: &Pod{}, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["value1", "value2"] + }] + }] + }}}`, + }, + }, + }, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + `, + }, + }, + }, + expectErr: true, + }, + } + + for i, tc := range testCases { + _, err := GetAffinityFromPodAnnotations(tc.pod.Annotations) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + } +} + +func TestTaintToString(t *testing.T) { + testCases := []struct { + taint *Taint + expectedString string + }{ + { + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectedString: "foo=bar:NoSchedule", + }, + { + taint: &Taint{ + Key: "foo", + Effect: TaintEffectNoSchedule, + }, + expectedString: "foo:NoSchedule", + }, + } + + for i, tc := range testCases { + if tc.expectedString != tc.taint.ToString() { + t.Errorf("[%v] expected taint %v converted to %s, got %s", i, tc.taint, tc.expectedString, tc.taint.ToString()) + } + } +} + +func TestMatchTaint(t *testing.T) { + testCases := []struct { + description string + taint *Taint + taintToMatch Taint + expectMatch bool + }{ + { + description: "two taints with the same key,value,effect should match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the same key,effect but different value should match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "different-value", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the different key cannot match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "different-key", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different effect cannot match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectPreferNoSchedule, + }, + expectMatch: false, + }, + } + + for _, tc := range testCases { + if tc.expectMatch != tc.taint.MatchTaint(tc.taintToMatch) { + t.Errorf("[%s] expect taint %s match taint %s", tc.description, tc.taint.ToString(), tc.taintToMatch.ToString()) + } + } +} + +func TestGetAvoidPodsFromNode(t *testing.T) { + controllerFlag := true + testCases := []struct { + node *Node + expectValue AvoidPods + expectErr bool + }{ + { + node: &Node{}, + expectValue: AvoidPods{}, + expectErr: false, + }, + { + node: &Node{ + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + PreferAvoidPodsAnnotationKey: ` + { + "preferAvoidPods": [ + { + "podSignature": { + "podController": { + "apiVersion": "v1", + "kind": "ReplicationController", + "name": "foo", + "uid": "abcdef123456", + "controller": true + } + }, + "reason": "some reason", + "message": "some message" + } + ] + }`, + }, + }, + }, + expectValue: AvoidPods{ + PreferAvoidPods: []PreferAvoidPodsEntry{ + { + PodSignature: PodSignature{ + PodController: &OwnerReference{ + APIVersion: "v1", + Kind: "ReplicationController", + Name: "foo", + UID: "abcdef123456", + Controller: &controllerFlag, + }, + }, + Reason: "some reason", + Message: "some message", + }, + }, + }, + expectErr: false, + }, + { + node: &Node{ + // Missing end symbol of "podController" and "podSignature" + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + PreferAvoidPodsAnnotationKey: ` + { + "preferAvoidPods": [ + { + "podSignature": { + "podController": { + "kind": "ReplicationController", + "apiVersion": "v1" + "reason": "some reason", + "message": "some message" + } + ] + }`, + }, + }, + }, + expectValue: AvoidPods{}, + expectErr: true, + }, + } + + for i, tc := range testCases { + v, err := GetAvoidPodsFromNodeAnnotations(tc.node.Annotations) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + if !reflect.DeepEqual(tc.expectValue, v) { + t.Errorf("[%v]expect value %v but got %v with %v", i, tc.expectValue, v, v.PreferAvoidPods[0].PodSignature.PodController.Controller) + } + } +} + +func TestSysctlsFromPodAnnotation(t *testing.T) { + type Test struct { + annotation string + expectValue []Sysctl + expectErr bool + } + for i, test := range []Test{ + { + annotation: "", + expectValue: nil, + }, + { + annotation: "foo.bar", + expectErr: true, + }, + { + annotation: "=123", + expectErr: true, + }, + { + annotation: "foo.bar=", + expectValue: []Sysctl{{Name: "foo.bar", Value: ""}}, + }, + { + annotation: "foo.bar=42", + expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}}, + }, + { + annotation: "foo.bar=42,", + expectErr: true, + }, + { + annotation: "foo.bar=42,abc.def=1", + expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}, {Name: "abc.def", Value: "1"}}, + }, + } { + sysctls, err := SysctlsFromPodAnnotation(test.annotation) + if test.expectErr && err == nil { + t.Errorf("[%v]expected error but got none", i) + } else if !test.expectErr && err != nil { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } else if !reflect.DeepEqual(sysctls, test.expectValue) { + t.Errorf("[%v]expect value %v but got %v", i, test.expectValue, sysctls) + } + } +} diff --git a/pkg/api/v1/pod/util.go b/pkg/api/v1/pod/util.go new file mode 100644 index 00000000000..1b2d7edb9a3 --- /dev/null +++ b/pkg/api/v1/pod/util.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "encoding/json" + "fmt" + + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/util/intstr" +) + +const ( + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. + // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' + PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname" + + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field. + // The annotation value is a string specifying the subdomain e.g. "my-web-service" + // If specified, on the pod itself, ".my-web-service..svc." would resolve to + // the pod's IP. + // If there is a headless service named "my-web-service" in the same namespace as the pod, then, + // .my-web-service..svc." would be resolved by the cluster DNS Server. + PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// TODO: remove this function when init containers becomes a stable feature +func SetInitContainersAndStatuses(pod *v1.Pod) error { + var initContainersAnnotation string + initContainersAnnotation = pod.Annotations[v1.PodInitContainersAnnotationKey] + initContainersAnnotation = pod.Annotations[v1.PodInitContainersBetaAnnotationKey] + if len(initContainersAnnotation) > 0 { + var values []v1.Container + if err := json.Unmarshal([]byte(initContainersAnnotation), &values); err != nil { + return err + } + pod.Spec.InitContainers = values + } + + var initContainerStatusesAnnotation string + initContainerStatusesAnnotation = pod.Annotations[v1.PodInitContainerStatusesAnnotationKey] + initContainerStatusesAnnotation = pod.Annotations[v1.PodInitContainerStatusesBetaAnnotationKey] + if len(initContainerStatusesAnnotation) > 0 { + var values []v1.ContainerStatus + if err := json.Unmarshal([]byte(initContainerStatusesAnnotation), &values); err != nil { + return err + } + pod.Status.InitContainerStatuses = values + } + return nil +} + +// TODO: remove this function when init containers becomes a stable feature +func SetInitContainersAnnotations(pod *v1.Pod) error { + if len(pod.Spec.InitContainers) > 0 { + value, err := json.Marshal(pod.Spec.InitContainers) + if err != nil { + return err + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + pod.Annotations[v1.PodInitContainersAnnotationKey] = string(value) + pod.Annotations[v1.PodInitContainersBetaAnnotationKey] = string(value) + } + return nil +} + +// TODO: remove this function when init containers becomes a stable feature +func SetInitContainersStatusesAnnotations(pod *v1.Pod) error { + if len(pod.Status.InitContainerStatuses) > 0 { + value, err := json.Marshal(pod.Status.InitContainerStatuses) + if err != nil { + return err + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + pod.Annotations[v1.PodInitContainerStatusesAnnotationKey] = string(value) + pod.Annotations[v1.PodInitContainerStatusesBetaAnnotationKey] = string(value) + } + return nil +} diff --git a/pkg/api/pod/util_test.go b/pkg/api/v1/pod/util_test.go similarity index 81% rename from pkg/api/pod/util_test.go rename to pkg/api/v1/pod/util_test.go index 5dd8ff88098..842398d6fa1 100644 --- a/pkg/api/pod/util_test.go +++ b/pkg/api/v1/pod/util_test.go @@ -19,26 +19,26 @@ package pod import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/intstr" ) func TestFindPort(t *testing.T) { testCases := []struct { name string - containers []api.Container + containers []v1.Container port intstr.IntOrString expected int pass bool }{{ name: "valid int, no ports", - containers: []api.Container{{}}, + containers: []v1.Container{{}}, port: intstr.FromInt(93), expected: 93, pass: true, }, { name: "valid int, with ports", - containers: []api.Container{{Ports: []api.ContainerPort{{ + containers: []v1.Container{{Ports: []v1.ContainerPort{{ Name: "", ContainerPort: 11, Protocol: "TCP", @@ -52,13 +52,13 @@ func TestFindPort(t *testing.T) { pass: true, }, { name: "valid str, no ports", - containers: []api.Container{{}}, + containers: []v1.Container{{}}, port: intstr.FromString("p"), expected: 0, pass: false, }, { name: "valid str, one ctr with ports", - containers: []api.Container{{Ports: []api.ContainerPort{{ + containers: []v1.Container{{Ports: []v1.ContainerPort{{ Name: "", ContainerPort: 11, Protocol: "UDP", @@ -76,7 +76,7 @@ func TestFindPort(t *testing.T) { pass: true, }, { name: "valid str, two ctr with ports", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "", ContainerPort: 11, Protocol: "UDP", @@ -94,7 +94,7 @@ func TestFindPort(t *testing.T) { pass: true, }, { name: "valid str, two ctr with same port", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "", ContainerPort: 11, Protocol: "UDP", @@ -112,7 +112,7 @@ func TestFindPort(t *testing.T) { pass: true, }, { name: "valid str, invalid protocol", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "a", ContainerPort: 11, Protocol: "snmp", @@ -123,7 +123,7 @@ func TestFindPort(t *testing.T) { pass: false, }, { name: "valid hostPort", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "a", ContainerPort: 11, HostPort: 81, @@ -136,7 +136,7 @@ func TestFindPort(t *testing.T) { }, { name: "invalid hostPort", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "a", ContainerPort: 11, HostPort: -1, @@ -150,7 +150,7 @@ func TestFindPort(t *testing.T) { }, { name: "invalid ContainerPort", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "a", ContainerPort: -1, Protocol: "TCP", @@ -163,7 +163,7 @@ func TestFindPort(t *testing.T) { }, { name: "HostIP Address", - containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{ Name: "a", ContainerPort: 11, HostIP: "192.168.1.1", @@ -177,8 +177,8 @@ func TestFindPort(t *testing.T) { } for _, tc := range testCases { - port, err := FindPort(&api.Pod{Spec: api.PodSpec{Containers: tc.containers}}, - &api.ServicePort{Protocol: "TCP", TargetPort: tc.port}) + port, err := FindPort(&v1.Pod{Spec: v1.PodSpec{Containers: tc.containers}}, + &v1.ServicePort{Protocol: "TCP", TargetPort: tc.port}) if err != nil && tc.pass { t.Errorf("unexpected error for %s: %v", tc.name, err) } diff --git a/pkg/api/v1/ref.go b/pkg/api/v1/ref.go new file mode 100644 index 00000000000..f9427350a77 --- /dev/null +++ b/pkg/api/v1/ref.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + "fmt" + "k8s.io/kubernetes/pkg/api" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := api.Scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: ///* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj } diff --git a/pkg/api/v1/resource_helpers.go b/pkg/api/v1/resource_helpers.go new file mode 100644 index 00000000000..9b7b9d16498 --- /dev/null +++ b/pkg/api/v1/resource_helpers.go @@ -0,0 +1,229 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now unversioned.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = unversioned.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} diff --git a/pkg/api/v1/resource_helpers_test.go b/pkg/api/v1/resource_helpers_test.go new file mode 100644 index 00000000000..e6d7fe3552b --- /dev/null +++ b/pkg/api/v1/resource_helpers_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestResourceHelpers(t *testing.T) { + cpuLimit := resource.MustParse("10") + memoryLimit := resource.MustParse("10G") + resourceSpec := ResourceRequirements{ + Limits: ResourceList{ + "cpu": cpuLimit, + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 { + t.Errorf("expected cpulimit %v, got %v", cpuLimit, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } + resourceSpec = ResourceRequirements{ + Limits: ResourceList{ + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { + t.Errorf("expected cpulimit %v, got %v", 0, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } +} + +func TestDefaultResourceHelpers(t *testing.T) { + resourceList := ResourceList{} + if resourceList.Cpu().Format != resource.DecimalSI { + t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format) + } + if resourceList.Memory().Format != resource.BinarySI { + t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format) + } +} + +func newPod(now unversioned.Time, ready bool, beforeSec int) *Pod { + conditionStatus := ConditionFalse + if ready { + conditionStatus = ConditionTrue + } + return &Pod{ + Status: PodStatus{ + Conditions: []PodCondition{ + { + Type: PodReady, + LastTransitionTime: unversioned.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)), + Status: conditionStatus, + }, + }, + }, + } +} + +func TestIsPodAvailable(t *testing.T) { + now := unversioned.Now() + tests := []struct { + pod *Pod + minReadySeconds int32 + expected bool + }{ + { + pod: newPod(now, false, 0), + minReadySeconds: 0, + expected: false, + }, + { + pod: newPod(now, true, 0), + minReadySeconds: 1, + expected: false, + }, + { + pod: newPod(now, true, 0), + minReadySeconds: 0, + expected: true, + }, + { + pod: newPod(now, true, 51), + minReadySeconds: 50, + expected: true, + }, + } + + for i, test := range tests { + isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now) + if isAvailable != test.expected { + t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable) + } + } +} diff --git a/pkg/api/v1/service/BUILD b/pkg/api/v1/service/BUILD new file mode 100644 index 00000000000..e97b7e1aaee --- /dev/null +++ b/pkg/api/v1/service/BUILD @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_binary", + "go_library", + "go_test", + "cgo_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "annotations.go", + "util.go", + ], + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/net/sets:go_default_library", + "//vendor:github.com/golang/glog", + ], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + library = "go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api:go_default_library", + "//pkg/util/net/sets:go_default_library", + ], +) diff --git a/pkg/api/v1/service/annotations.go b/pkg/api/v1/service/annotations.go new file mode 100644 index 00000000000..141e572124a --- /dev/null +++ b/pkg/api/v1/service/annotations.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api/v1" +) + +const ( + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The alpha annotations have been deprecated, remove them when we move this feature to GA. + + // AlphaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service + // If not specified, annotation is created by the service api backend with the allocated nodePort + // Will use user-specified nodePort value if specified by the client + AlphaAnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport" + + // AlphaAnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local + // endpoints only. This preserves Source IP and avoids a second hop. + AlphaAnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic" + + // BetaAnnotationHealthCheckNodePort is the beta version of AlphaAnnotationHealthCheckNodePort. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic is the beta version of AlphaAnnotationExternalTraffic. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) + +// NeedsHealthCheck Check service for health check annotations +func NeedsHealthCheck(service *v1.Service) bool { + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationExternalTraffic, BetaAnnotationExternalTraffic} { + if l, ok := service.Annotations[annotation]; ok { + if l == AnnotationValueExternalTrafficLocal { + return true + } else if l == AnnotationValueExternalTrafficGlobal { + return false + } else { + glog.Errorf("Invalid value for annotation %v: %v", annotation, l) + } + } + } + return false +} + +// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists +func GetServiceHealthCheckNodePort(service *v1.Service) int32 { + if !NeedsHealthCheck(service) { + return 0 + } + // First check the alpha annotation and then the beta. This is so existing + // Services continue to work till the user decides to transition to beta. + // If they transition to beta, there's no way to go back to alpha without + // rolling back the cluster. + for _, annotation := range []string{AlphaAnnotationHealthCheckNodePort, BetaAnnotationHealthCheckNodePort} { + if l, ok := service.Annotations[annotation]; ok { + p, err := strconv.Atoi(l) + if err != nil { + glog.Errorf("Failed to parse annotation %v: %v", annotation, err) + continue + } + return int32(p) + } + } + return 0 +} + +// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check +func GetServiceHealthCheckPathPort(service *v1.Service) (string, int32) { + if !NeedsHealthCheck(service) { + return "", 0 + } + port := GetServiceHealthCheckNodePort(service) + if port == 0 { + return "", 0 + } + return "/healthz", port +} diff --git a/pkg/api/v1/service/util.go b/pkg/api/v1/service/util.go new file mode 100644 index 00000000000..3c00a495245 --- /dev/null +++ b/pkg/api/v1/service/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api/v1" + netsets "k8s.io/kubernetes/pkg/util/net/sets" +) + +const ( + defaultLoadBalancerSourceRanges = "0.0.0.0/0" +) + +// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 +func IsAllowAll(ipnets netsets.IPNet) bool { + for _, s := range ipnets.StringSlice() { + if s == "0.0.0.0/0" { + return true + } + } + return false +} + +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// extracting the source ranges to allow, and if not present returns a default (allow-all) value. +func GetLoadBalancerSourceRanges(service *v1.Service) (netsets.IPNet, error) { + var ipnets netsets.IPNet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = netsets.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = netsets.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) + } + } + return ipnets, nil +} diff --git a/pkg/api/v1/service/util_test.go b/pkg/api/v1/service/util_test.go new file mode 100644 index 00000000000..28572f3c829 --- /dev/null +++ b/pkg/api/v1/service/util_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "testing" + + "strings" + + "k8s.io/kubernetes/pkg/api/v1" + netsets "k8s.io/kubernetes/pkg/util/net/sets" +) + +func TestGetLoadBalancerSourceRanges(t *testing.T) { + checkError := func(v string) { + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = v + svc := v1.Service{} + svc.Annotations = annotations + _, err := GetLoadBalancerSourceRanges(&svc) + if err == nil { + t.Errorf("Expected error parsing: %q", v) + } + svc = v1.Service{} + svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") + _, err = GetLoadBalancerSourceRanges(&svc) + if err == nil { + t.Errorf("Expected error parsing: %q", v) + } + } + checkError("10.0.0.1/33") + checkError("foo.bar") + checkError("10.0.0.1/32,*") + checkError("10.0.0.1/32,") + checkError("10.0.0.1/32, ") + checkError("10.0.0.1") + + checkOK := func(v string) netsets.IPNet { + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = v + svc := v1.Service{} + svc.Annotations = annotations + cidrs, err := GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error parsing: %q", v) + } + svc = v1.Service{} + svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") + cidrs, err = GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error parsing: %q", v) + } + return cidrs + } + cidrs := checkOK("192.168.0.1/32") + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + cidrs = checkOK("192.168.0.1/32,192.168.0.1/32") + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR (after de-dup): %v", cidrs.StringSlice()) + } + cidrs = checkOK("192.168.0.1/32,192.168.0.2/32") + if len(cidrs) != 2 { + t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice()) + } + cidrs = checkOK(" 192.168.0.1/32 , 192.168.0.2/32 ") + if len(cidrs) != 2 { + t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice()) + } + // check LoadBalancerSourceRanges not specified + svc := v1.Service{} + cidrs, err := GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + if !IsAllowAll(cidrs) { + t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice()) + } + // check SourceRanges annotation is empty + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = "" + svc = v1.Service{} + svc.Annotations = annotations + cidrs, err = GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + if !IsAllowAll(cidrs) { + t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice()) + } +} + +func TestAllowAll(t *testing.T) { + checkAllowAll := func(allowAll bool, cidrs ...string) { + ipnets, err := netsets.ParseIPNets(cidrs...) + if err != nil { + t.Errorf("Unexpected error parsing cidrs: %v", cidrs) + } + if allowAll != IsAllowAll(ipnets) { + t.Errorf("IsAllowAll did not return expected value for %v", cidrs) + } + } + checkAllowAll(false, "10.0.0.1/32") + checkAllowAll(false, "10.0.0.1/32", "10.0.0.2/32") + checkAllowAll(false, "10.0.0.1/32", "10.0.0.1/32") + + checkAllowAll(true, "0.0.0.0/0") + checkAllowAll(true, "192.168.0.0/0") + checkAllowAll(true, "192.168.0.1/32", "0.0.0.0/0") +} diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index cc8abfa958c..ae73d3e29d0 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -2995,6 +2995,8 @@ const ( NodeDiskPressure NodeConditionType = "DiskPressure" // NodeNetworkUnavailable means that network for the node is not correctly configured. NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeInodePressure means the kublet is under pressure due to insufficient available inodes. + NodeInodePressure NodeConditionType = "InodePressure" ) // NodeCondition contains condition information for a node. @@ -3790,6 +3792,35 @@ const ( // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets DockerConfigKey = ".dockercfg" + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" // SecretTypeTLS contains information about a TLS client or server secret. It // is primarily used with TLS termination of the Ingress resource, but may be // used in other types. @@ -4011,4 +4042,14 @@ type RangeAllocation struct { const ( // "default-scheduler" is the name of default scheduler. DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion ) diff --git a/pkg/api/v1/validation/validation.go b/pkg/api/v1/validation/validation.go new file mode 100644 index 00000000000..f6138a49fdf --- /dev/null +++ b/pkg/api/v1/validation/validation.go @@ -0,0 +1,159 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +const isNegativeErrorMsg string = `must be greater than or equal to 0` +const isNotIntegerErrorMsg string = `must be an integer` + +func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") + for resourceName, quantity := range requirements.Limits { + fldPath := limPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + // Check that request <= limit. + requestQuantity, exists := requirements.Requests[resourceName] + if exists { + // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + if resourceName == v1.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", v1.ResourceNvidiaGPU))) + } else if quantity.Cmp(requestQuantity) < 0 { + allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName))) + } + } + } + for resourceName, quantity := range requirements.Requests { + fldPath := reqPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + } + + return allErrs +} + +func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardContainerResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) + } + } + return field.ErrorList{} +} + +// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource +func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) + if api.IsIntegerResourceName(resource) { + if value.MilliValue()%int64(1000) != int64(0) { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) + } + } + return allErrs +} + +// Validates that a Quantity is not negative +func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) + } + return allErrs +} + +// Validate compute resource typename. +// Refer to docs/design/resources.md for more details. +func validateResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !api.IsStandardResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) + } + } + + return field.ErrorList{} +} + +func ValidatePodLogOptions(opts *v1.PodLogOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.TailLines != nil && *opts.TailLines < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) + } + if opts.LimitBytes != nil && *opts.LimitBytes < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) + } + switch { + case opts.SinceSeconds != nil && opts.SinceTime != nil: + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) + case opts.SinceSeconds != nil: + if *opts.SinceSeconds < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) + } + } + return allErrs +} + +func AccumulateUniqueHostPorts(containers []v1.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for ci, ctr := range containers { + idxPath := fldPath.Index(ci) + portsPath := idxPath.Child("ports") + for pi := range ctr.Ports { + idxPath := portsPath.Index(pi) + port := ctr.Ports[pi].HostPort + if port == 0 { + continue + } + str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) + if accumulator.Has(str) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) + } else { + accumulator.Insert(str) + } + } + } + return allErrs +} diff --git a/pkg/apis/rbac/v1alpha1/helpers.go b/pkg/apis/rbac/v1alpha1/helpers.go new file mode 100644 index 00000000000..f424e57b040 --- /dev/null +++ b/pkg/apis/rbac/v1alpha1/helpers.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/v1" +) + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: v1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/pkg/apis/rbac/v1alpha1/types.go b/pkg/apis/rbac/v1alpha1/types.go index 42617aca61f..9b336bb533d 100644 --- a/pkg/apis/rbac/v1alpha1/types.go +++ b/pkg/apis/rbac/v1alpha1/types.go @@ -27,6 +27,24 @@ import ( // 2. evaluation of RoleBindings in the namespace requested - short circuit on match // 3. deny by default +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + UserAll = "*" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { diff --git a/pkg/apis/storage/v1beta1/util/helpers.go b/pkg/apis/storage/v1beta1/util/helpers.go new file mode 100644 index 00000000000..3aa0aafc678 --- /dev/null +++ b/pkg/apis/storage/v1beta1/util/helpers.go @@ -0,0 +1,134 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "k8s.io/kubernetes/pkg/api/v1" + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used +const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// AlphaStorageClassAnnotation represents the previous alpha storage class +// annotation. it's no longer used and held here for posterity. +const AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" + +// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. +// It's currently still used and will be held for backwards compatibility +const BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + +// StorageClassAnnotation represents the storage class associated with a resource. +// It currently matches the Beta value and can change when official is set. +// - in PersistentVolumeClaim it represents required class to match. +// Only PersistentVolumes with the same class (i.e. annotation with the same +// value) can be bound to the claim. In case no such volume exists, the +// controller will provision a new one using StorageClass instance with +// the same name as the annotation value. +// - in PersistentVolume it represents storage class to which the persistent +// volume belongs. +//TODO: Update this to final annotation value as it matches BetaStorageClassAnnotation for now +const StorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + +// GetVolumeStorageClass returns value of StorageClassAnnotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolume.Spec.Class value when this attribute is +// introduced. +func GetVolumeStorageClass(volume *v1.PersistentVolume) string { + if class, found := volume.Annotations[StorageClassAnnotation]; found { + return class + } + + // 'nil' is interpreted as "", i.e. the volume does not belong to any class. + return "" +} + +// GetClaimStorageClass returns name of class that is requested by given claim. +// Request for `nil` class is interpreted as request for class "", +// i.e. for a classless PV. +// TODO: change to PersistentVolumeClaim.Spec.Class value when this +// attribute is introduced. +func GetClaimStorageClass(claim *v1.PersistentVolumeClaim) string { + if class, found := claim.Annotations[StorageClassAnnotation]; found { + return class + } + + return "" +} + +// GetStorageClassAnnotation returns the StorageClass value +// if the annotation is set, empty string if not +// TODO: remove Alpha and Beta when no longer used or needed +func GetStorageClassAnnotation(obj v1.ObjectMeta) string { + if class, ok := obj.Annotations[StorageClassAnnotation]; ok { + return class + } + if class, ok := obj.Annotations[BetaStorageClassAnnotation]; ok { + return class + } + if class, ok := obj.Annotations[AlphaStorageClassAnnotation]; ok { + return class + } + + return "" +} + +// HasStorageClassAnnotation returns a boolean +// if the annotation is set +// TODO: remove Alpha and Beta when no longer used or needed +func HasStorageClassAnnotation(obj v1.ObjectMeta) bool { + if _, found := obj.Annotations[StorageClassAnnotation]; found { + return found + } + if _, found := obj.Annotations[BetaStorageClassAnnotation]; found { + return found + } + if _, found := obj.Annotations[AlphaStorageClassAnnotation]; found { + return found + } + + return false + +} + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotationText(obj v1.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// IsDefaultAnnotation returns a boolean if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotation(obj v1.ObjectMeta) bool { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return true + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return true + } + + return false +}