Rename 'portal IP' to 'cluster IP' most everywhere

This covers obvious transforms, but not --portal_net, $PORTAL_NET and
similar.
This commit is contained in:
Tim Hockin 2015-05-23 13:41:11 -07:00
parent 46686616d4
commit 4318ca5a8b
43 changed files with 389 additions and 326 deletions

View File

@ -213,7 +213,7 @@ func (ks *kube2sky) handleEndpointAdd(obj interface{}) {
func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error { func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error {
for i := range service.Spec.Ports { for i := range service.Spec.Ports {
b, err := json.Marshal(getSkyMsg(service.Spec.PortalIP, service.Spec.Ports[i].Port)) b, err := json.Marshal(getSkyMsg(service.Spec.ClusterIP, service.Spec.Ports[i].Port))
if err != nil { if err != nil {
return err return err
} }
@ -229,7 +229,7 @@ func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error {
if len(service.Spec.Ports) == 0 { if len(service.Spec.Ports) == 0 {
glog.Fatalf("unexpected service with no ports: %v", service) glog.Fatalf("unexpected service with no ports: %v", service)
} }
// if PortalIP is not set, a DNS entry should not be created // if ClusterIP is not set, a DNS entry should not be created
if !kapi.IsServiceIPSet(service) { if !kapi.IsServiceIPSet(service) {
return ks.newHeadlessService(subdomain, service) return ks.newHeadlessService(subdomain, service)
} }

View File

@ -94,7 +94,7 @@ type hostPort struct {
func getHostPort(service *kapi.Service) *hostPort { func getHostPort(service *kapi.Service) *hostPort {
return &hostPort{ return &hostPort{
Host: service.Spec.PortalIP, Host: service.Spec.ClusterIP,
Port: service.Spec.Ports[0].Port, Port: service.Spec.Ports[0].Port,
} }
} }
@ -134,7 +134,7 @@ func TestHeadlessService(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@ -187,7 +187,7 @@ func TestHeadlessServiceEndpointsUpdate(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@ -244,7 +244,7 @@ func TestHeadlessServiceWithDelayedEndpointsAddition(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@ -308,7 +308,7 @@ func TestAddSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
k2s.newService(&service) k2s.newService(&service)
@ -334,12 +334,12 @@ func TestUpdateSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
k2s.newService(&service) k2s.newService(&service)
assert.Len(t, ec.writes, 2) assert.Len(t, ec.writes, 2)
service.Spec.PortalIP = "0.0.0.0" service.Spec.ClusterIP = "0.0.0.0"
k2s.newService(&service) k2s.newService(&service)
expectedValue := getHostPort(&service) expectedValue := getHostPort(&service)
assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue) assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue)
@ -363,7 +363,7 @@ func TestDeleteSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
// Add the service // Add the service

View File

@ -23,7 +23,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
export roles=("ai" "i" "i") export roles=("ai" "i" "i")
# Define minion numbers # Define minion numbers
export NUM_MINIONS=${NUM_MINIONS:-3} export NUM_MINIONS=${NUM_MINIONS:-3}
# define the IP range used for service portal. # define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export PORTAL_NET=192.168.3.0/24 export PORTAL_NET=192.168.3.0/24
# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range # define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range

View File

@ -85,8 +85,8 @@ type APIServer struct {
OldEtcdPathPrefix string OldEtcdPathPrefix string
CorsAllowedOriginList util.StringList CorsAllowedOriginList util.StringList
AllowPrivileged bool AllowPrivileged bool
PortalNet util.IPNet // TODO: make this a list ServiceClusterIPRange util.IPNet // TODO: make this a list
ServiceNodePorts util.PortRange ServiceNodePortRange util.PortRange
EnableLogsSupport bool EnableLogsSupport bool
MasterServiceNamespace string MasterServiceNamespace string
RuntimeConfig util.ConfigurationMap RuntimeConfig util.ConfigurationMap
@ -183,8 +183,9 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.OldEtcdPathPrefix, "old-etcd-prefix", s.OldEtcdPathPrefix, "The previous prefix for all resource paths in etcd, if any.") fs.StringVar(&s.OldEtcdPathPrefix, "old-etcd-prefix", s.OldEtcdPathPrefix, "The previous prefix for all resource paths in etcd, if any.")
fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.") fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.")
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.") fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.")
fs.Var(&s.PortalNet, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.") fs.Var(&s.ServiceClusterIPRange, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
fs.Var(&s.ServiceNodePorts, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.") fs.Var(&s.ServiceNodePortRange, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.")
fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods") fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods")
fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/<version> key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.") fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/<version> key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.")
client.BindKubeletClientConfigFlags(fs, &s.KubeletConfig) client.BindKubeletClientConfigFlags(fs, &s.KubeletConfig)
@ -196,9 +197,9 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
} }
// TODO: Longer term we should read this from some config store, rather than a flag. // TODO: Longer term we should read this from some config store, rather than a flag.
func (s *APIServer) verifyPortalFlags() { func (s *APIServer) verifyClusterIPFlags() {
if s.PortalNet.IP == nil { if s.ServiceClusterIPRange.IP == nil {
glog.Fatal("No --portal-net specified") glog.Fatal("No --service-cluster-ip-range specified")
} }
} }
@ -227,7 +228,7 @@ func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersi
// Run runs the specified APIServer. This should never exit. // Run runs the specified APIServer. This should never exit.
func (s *APIServer) Run(_ []string) error { func (s *APIServer) Run(_ []string) error {
s.verifyPortalFlags() s.verifyClusterIPFlags()
if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
glog.Fatalf("specify either --etcd-servers or --etcd-config") glog.Fatalf("specify either --etcd-servers or --etcd-config")
@ -302,7 +303,7 @@ func (s *APIServer) Run(_ []string) error {
} }
} }
n := net.IPNet(s.PortalNet) n := net.IPNet(s.ServiceClusterIPRange)
// Default to the private server key for service account token signing // Default to the private server key for service account token signing
if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
@ -349,7 +350,7 @@ func (s *APIServer) Run(_ []string) error {
EtcdHelper: helper, EtcdHelper: helper,
EventTTL: s.EventTTL, EventTTL: s.EventTTL,
KubeletClient: kubeletClient, KubeletClient: kubeletClient,
PortalNet: &n, ServiceClusterIPRange: &n,
EnableCoreControllers: true, EnableCoreControllers: true,
EnableLogsSupport: s.EnableLogsSupport, EnableLogsSupport: s.EnableLogsSupport,
EnableUISupport: true, EnableUISupport: true,

View File

@ -83,7 +83,7 @@ We want to be able to assign IP addresses externally from Docker ([Docker issue
In addition to enabling self-registration with 3rd-party discovery mechanisms, we'd like to setup DDNS automatically ([Issue #146](https://github.com/GoogleCloudPlatform/kubernetes/issues/146)). hostname, $HOSTNAME, etc. should return a name for the pod ([Issue #298](https://github.com/GoogleCloudPlatform/kubernetes/issues/298)), and gethostbyname should be able to resolve names of other pods. Probably we need to set up a DNS resolver to do the latter ([Docker issue #2267](https://github.com/dotcloud/docker/issues/2267)), so that we don't need to keep /etc/hosts files up to date dynamically. In addition to enabling self-registration with 3rd-party discovery mechanisms, we'd like to setup DDNS automatically ([Issue #146](https://github.com/GoogleCloudPlatform/kubernetes/issues/146)). hostname, $HOSTNAME, etc. should return a name for the pod ([Issue #298](https://github.com/GoogleCloudPlatform/kubernetes/issues/298)), and gethostbyname should be able to resolve names of other pods. Probably we need to set up a DNS resolver to do the latter ([Docker issue #2267](https://github.com/dotcloud/docker/issues/2267)), so that we don't need to keep /etc/hosts files up to date dynamically.
[Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service portal IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service portal IP in DNS, and for that to become the preferred resolution protocol. [Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service's IP in DNS, and for that to become the preferred resolution protocol.
We'd also like to accommodate other load-balancing solutions (e.g., HAProxy), non-load-balanced services ([Issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260)), and other types of groups (worker pools, etc.). Providing the ability to Watch a label selector applied to pod addresses would enable efficient monitoring of group membership, which could be directly consumed or synced with a discovery mechanism. Event hooks ([Issue #140](https://github.com/GoogleCloudPlatform/kubernetes/issues/140)) for join/leave events would probably make this even easier. We'd also like to accommodate other load-balancing solutions (e.g., HAProxy), non-load-balanced services ([Issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260)), and other types of groups (worker pools, etc.). Providing the ability to Watch a label selector applied to pod addresses would enable efficient monitoring of group membership, which could be directly consumed or synced with a discovery mechanism. Event hooks ([Issue #140](https://github.com/GoogleCloudPlatform/kubernetes/issues/140)) for join/leave events would probably make this even easier.

View File

@ -87,7 +87,7 @@ Some firewall software that uses iptables may not interact well with
kubernetes. If you're having trouble around networking, try disabling any kubernetes. If you're having trouble around networking, try disabling any
firewall or other iptables-using systems, first. firewall or other iptables-using systems, first.
By default the IP range for service portals is 10.0.*.* - depending on your By default the IP range for service cluster IPs is 10.0.*.* - depending on your
docker installation, this may conflict with IPs for containers. If you find docker installation, this may conflict with IPs for containers. If you find
containers running with IPs in this range, edit hack/local-cluster-up.sh and containers running with IPs in this range, edit hack/local-cluster-up.sh and
change the portal_net flag to something else. change the portal_net flag to something else.

View File

@ -235,7 +235,7 @@ $ mesos ps
``` ```
The number of Kubernetes pods listed earlier (from `bin/kubectl get pods`) should equal to the number active Mesos tasks listed the previous listing (`mesos ps`). The number of Kubernetes pods listed earlier (from `bin/kubectl get pods`) should equal to the number active Mesos tasks listed the previous listing (`mesos ps`).
Next, determine the internal IP address of the front end [service portal][8]: Next, determine the internal IP address of the front end [service][8]:
```bash ```bash
$ bin/kubectl get services $ bin/kubectl get services
@ -268,14 +268,14 @@ Or interact with the frontend application via your browser, in 2 steps:
First, open the firewall on the master machine. First, open the firewall on the master machine.
```bash ```bash
# determine the internal port for the frontend service portal # determine the internal port for the frontend service
$ sudo iptables-save|grep -e frontend # -- port 36336 in this case $ sudo iptables-save|grep -e frontend # -- port 36336 in this case
-A KUBE-PORTALS-CONTAINER -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-CONTAINER -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-CONTAINER -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-CONTAINER -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-HOST -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-HOST -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-HOST -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-HOST -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
# open up access to the internal port for the frontend service portal # open up access to the internal port for the frontend service
$ sudo iptables -A INPUT -i eth0 -p tcp -m state --state NEW,ESTABLISHED -m tcp \ $ sudo iptables -A INPUT -i eth0 -p tcp -m state --state NEW,ESTABLISHED -m tcp \
--dport ${internal_frontend_service_port} -j ACCEPT --dport ${internal_frontend_service_port} -j ACCEPT
``` ```
@ -297,7 +297,7 @@ Now, you can visit the guestbook in your browser!
[5]: https://google.mesosphere.com [5]: https://google.mesosphere.com
[6]: http://mesosphere.com/docs/getting-started/cloud/google/mesosphere/#vpn-setup [6]: http://mesosphere.com/docs/getting-started/cloud/google/mesosphere/#vpn-setup
[7]: https://github.com/mesosphere/kubernetes-mesos/tree/v0.4.0/examples/guestbook [7]: https://github.com/mesosphere/kubernetes-mesos/tree/v0.4.0/examples/guestbook
[8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-portals [8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-vips
[9]: mesos/k8s-firewall.png [9]: mesos/k8s-firewall.png
[10]: mesos/k8s-guestbook.png [10]: mesos/k8s-guestbook.png
[11]: http://mesos.apache.org/ [11]: http://mesos.apache.org/

View File

@ -135,7 +135,7 @@ The the kube-apiserver several options.
DEPRECATED: see --insecure-port instead DEPRECATED: see --insecure-port instead
**--portal-net**=<nil> **--portal-net**=<nil>
A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods. A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
**--profiling**=true **--profiling**=true
Enable profiling via web interface host:port/debug/pprof/ Enable profiling via web interface host:port/debug/pprof/

View File

@ -179,7 +179,7 @@ The the kube\-apiserver several options.
.PP .PP
\fB\-\-portal\-net\fP= \fB\-\-portal\-net\fP=
A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods. A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
.PP .PP
\fB\-\-profiling\fP=true \fB\-\-profiling\fP=true

View File

@ -42,7 +42,7 @@ applications will expose one or more network endpoints for clients to connect to
balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to
server traffic for applications. This is the primary, but not sole, source of data for making decisions. server traffic for applications. This is the primary, but not sole, source of data for making decisions.
Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-portals) Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-vips)
running on each node directs service requests to the underlying implementation. running on each node directs service requests to the underlying implementation.
While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage

View File

@ -20,6 +20,58 @@ clustered database or key-value store. We will target such workloads for our
## v1 APIs ## v1 APIs
For existing and future workloads, we want to provide a consistent, stable set of APIs, over which developers can build and extend Kubernetes. This includes input validation, a consistent API structure, clean semantics, and improved diagnosability of the system. For existing and future workloads, we want to provide a consistent, stable set of APIs, over which developers can build and extend Kubernetes. This includes input validation, a consistent API structure, clean semantics, and improved diagnosability of the system.
||||||| merged common ancestors
## APIs and core features
1. Consistent v1 API
- Status: DONE. [v1beta3](http://kubernetesio.blogspot.com/2015/04/introducing-kubernetes-v1beta3.html) was developed as the release candidate for the v1 API.
2. Multi-port services for apps which need more than one port on the same portal IP ([#1802](https://github.com/GoogleCloudPlatform/kubernetes/issues/1802))
- Status: DONE. Released in 0.15.0
3. Nominal services for applications which need one stable IP per pod instance ([#260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260))
- Status: #2585 covers some design options.
4. API input is scrubbed of status fields in favor of a new API to set status ([#4248](https://github.com/GoogleCloudPlatform/kubernetes/issues/4248))
- Status: DONE
5. Input validation reporting versioned field names ([#3084](https://github.com/GoogleCloudPlatform/kubernetes/issues/3084))
- Status: in progress
6. Error reporting: Report common problems in ways that users can discover
- Status:
7. Event management: Make events usable and useful
- Status:
8. Persistent storage support ([#5105](https://github.com/GoogleCloudPlatform/kubernetes/issues/5105))
- Status: in progress
9. Allow nodes to join/leave a cluster ([#6087](https://github.com/GoogleCloudPlatform/kubernetes/issues/6087),[#3168](https://github.com/GoogleCloudPlatform/kubernetes/issues/3168))
- Status: in progress ([#6949](https://github.com/GoogleCloudPlatform/kubernetes/pull/6949))
10. Handle node death
- Status: mostly covered by nodes joining/leaving a cluster
11. Allow live cluster upgrades ([#6075](https://github.com/GoogleCloudPlatform/kubernetes/issues/6075),[#6079](https://github.com/GoogleCloudPlatform/kubernetes/issues/6079))
- Status: design in progress
12. Allow kernel upgrades
- Status: mostly covered by nodes joining/leaving a cluster, need demonstration
13. Allow rolling-updates to fail gracefully ([#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353))
- Status:
14. Easy .dockercfg
- Status:
15. Demonstrate cluster stability over time
- Status
16. Kubelet use the kubernetes API to fetch jobs to run (instead of etcd) on supported platforms
- Status: DONE
## Reliability and performance
1. Restart system components in case of crash (#2884)
- Status: in progress
2. Scale to 100 nodes (#3876)
- Status: in progress
3. Scale to 30-50 pods (1-2 containers each) per node (#4188)
- Status:
4. Scheduling throughput: 99% of scheduling decisions made in less than 1s on 100 node, 3000 pod cluster; linear time to number of nodes and pods (#3954)
5. Startup time: 99% of end-to-end pod startup time with prepulled images is less than 5s on 100 node, 3000 pod cluster; linear time to number of nodes and pods (#3952, #3954)
- Status:
6. API performance: 99% of API calls return in less than 1s; constant time to number of nodes and pods (#4521)
- Status:
7. Manage and report disk space on nodes (#4135)
- Status: in progress
8. API test coverage more than 85% in e2e tests
- Status:
In addition, we will provide versioning and deprecation policies for the APIs. In addition, we will provide versioning and deprecation policies for the APIs.

View File

@ -31,7 +31,7 @@ that is updated whenever the set of `Pods` in a `Service` changes. For
non-native applications, Kubernetes offers a virtual-IP-based bridge to Services non-native applications, Kubernetes offers a virtual-IP-based bridge to Services
which redirects to the backend `Pods`. which redirects to the backend `Pods`.
## Defining a Service ## Defining a service
A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the
REST objects, a `Service` definition can be POSTed to the apiserver to create a REST objects, a `Service` definition can be POSTed to the apiserver to create a
@ -138,7 +138,7 @@ Accessing a `Service` without a selector works the same as if it had selector.
The traffic will be routed to endpoints defined by the user (`1.2.3.4:80` in The traffic will be routed to endpoints defined by the user (`1.2.3.4:80` in
this example). this example).
## Portals and service proxies ## Virtual IPs and service proxies
Every node in a Kubernetes cluster runs a `kube-proxy`. This application Every node in a Kubernetes cluster runs a `kube-proxy`. This application
watches the Kubernetes master for the addition and removal of `Service` watches the Kubernetes master for the addition and removal of `Service`
@ -199,20 +199,22 @@ disambiguated. For example:
} }
``` ```
## Choosing your own PortalIP address ## Choosing your own IP address
A user can specify their own `PortalIP` address as part of a `Service` creation A user can specify their own cluster IP address as part of a `Service` creation
request. For example, if they already have an existing DNS entry that they request. To do this, set the `spec.clusterIP` field (called `portalIP` in
wish to replace, or legacy systems that are configured for a specific IP v1beta3 and earlier APIs). For example, if they already have an existing DNS
address and difficult to re-configure. The `PortalIP` address that a user entry that they wish to replace, or legacy systems that are configured for a
specific IP address and difficult to re-configure. The IP address that a user
chooses must be a valid IP address and within the portal_net CIDR range that is chooses must be a valid IP address and within the portal_net CIDR range that is
specified by flag to the API server. If the PortalIP value is invalid, the specified by flag to the API server. If the IP address value is invalid, the
apiserver returns a 422 HTTP status code to indicate that the value is invalid. apiserver returns a 422 HTTP status code to indicate that the value is invalid.
### Why not use round-robin DNS? ### Why not use round-robin DNS?
A question that pops up every now and then is why we do all this stuff with A question that pops up every now and then is why we do all this stuff with
portals rather than just use standard round-robin DNS. There are a few reasons: virtual IPs rather than just use standard round-robin DNS. There are a few
reasons:
* There is a long history of DNS libraries not respecting DNS TTLs and * There is a long history of DNS libraries not respecting DNS TTLs and
caching the results of name lookups. caching the results of name lookups.
@ -221,7 +223,7 @@ portals rather than just use standard round-robin DNS. There are a few reasons:
client re-resolving DNS over and over would be difficult to manage. client re-resolving DNS over and over would be difficult to manage.
We try to discourage users from doing things that hurt themselves. That said, We try to discourage users from doing things that hurt themselves. That said,
if enough people ask for this, we may implement it as an alternative to portals. if enough people ask for this, we may implement it as an alternative.
## Discovering services ## Discovering services
@ -238,7 +240,7 @@ and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables,
where the Service name is upper-cased and dashes are converted to underscores. where the Service name is upper-cased and dashes are converted to underscores.
For example, the Service "redis-master" which exposes TCP port 6379 and has been For example, the Service "redis-master" which exposes TCP port 6379 and has been
allocated portal IP address 10.0.0.11 produces the following environment allocated cluster IP address 10.0.0.11 produces the following environment
variables: variables:
``` ```
@ -272,24 +274,25 @@ cluster IP.
We will soon add DNS support for multi-port `Service`s in the form of SRV We will soon add DNS support for multi-port `Service`s in the form of SRV
records. records.
## Headless Services ## Headless services
Sometimes you don't need or want a single service IP. In this case, you can Sometimes you don't need or want load-balancing and a single service IP. In
create "headless" services by specifying `"None"` for the `PortalIP`. For such this case, you can create "headless" services by specifying `"None"` for the
`Service`s, a cluster IP is not allocated and service-specific environment cluster IP (`spec.clusterIP` or `spec.portalIP` in v1beta3 and earlier APIs).
variables for `Pod`s are not created. DNS is configured to return multiple A For such `Service`s, a cluster IP is not allocated and service-specific
records (addresses) for the `Service` name, which point directly to the `Pod`s environment variables for `Pod`s are not created. DNS is configured to return
backing the `Service`. Additionally, the kube proxy does not handle these multiple A records (addresses) for the `Service` name, which point directly to
services and there is no load balancing or proxying done by the platform for the `Pod`s backing the `Service`. Additionally, the kube proxy does not handle
them. The endpoints controller will still create `Endpoints` records in the these services and there is no load balancing or proxying done by the platform
API. for them. The endpoints controller will still create `Endpoints` records in
the API.
This option allows developers to reduce coupling to the Kubernetes system, if This option allows developers to reduce coupling to the Kubernetes system, if
they desire, but leaves them freedom to do discovery in their own way. they desire, but leaves them freedom to do discovery in their own way.
Applications can still use a self-registration pattern and adapters for other Applications can still use a self-registration pattern and adapters for other
discovery systems could easily be built upon this API. discovery systems could easily be built upon this API.
## External Services ## External services
For some parts of your application (e.g. frontends) you may want to expose a For some parts of your application (e.g. frontends) you may want to expose a
Service onto an external (outside of your cluster, maybe public internet) IP Service onto an external (outside of your cluster, maybe public internet) IP
@ -366,7 +369,7 @@ though exactly how that works depends on the cloud provider.
## Shortcomings ## Shortcomings
We expect that using iptables and userspace proxies for portals will work at We expect that using iptables and userspace proxies for VIPs will work at
small to medium scale, but may not scale to very large clusters with thousands small to medium scale, but may not scale to very large clusters with thousands
of Services. See [the original design proposal for of Services. See [the original design proposal for
portals](https://github.com/GoogleCloudPlatform/kubernetes/issues/1107) for more portals](https://github.com/GoogleCloudPlatform/kubernetes/issues/1107) for more
@ -387,7 +390,7 @@ but the current API requires it.
In the future we envision that the proxy policy can become more nuanced than In the future we envision that the proxy policy can become more nuanced than
simple round robin balancing, for example master elected or sharded. We also simple round robin balancing, for example master elected or sharded. We also
envision that some `Services` will have "real" load balancers, in which case the envision that some `Services` will have "real" load balancers, in which case the
portal will simply transport the packets there. VIP will simply transport the packets there.
There's a There's a
[proposal](https://github.com/GoogleCloudPlatform/kubernetes/issues/3760) to [proposal](https://github.com/GoogleCloudPlatform/kubernetes/issues/3760) to
@ -400,7 +403,7 @@ We intend to have first-class support for L7 (HTTP) `Service`s.
We intend to have more flexible ingress modes for `Service`s which encompass We intend to have more flexible ingress modes for `Service`s which encompass
the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more.
## The gory details of portals ## The gory details of virtual IPs
The previous information should be sufficient for many people who just want to The previous information should be sufficient for many people who just want to
use `Services`. However, there is a lot going on behind the scenes that may be use `Services`. However, there is a lot going on behind the scenes that may be
@ -427,26 +430,25 @@ of Kubernetes that used in memory locking) as well as checking for invalid
assignments due to administrator intervention and cleaning up any any IPs assignments due to administrator intervention and cleaning up any any IPs
that were allocated but which no service currently uses. that were allocated but which no service currently uses.
### IPs and Portals ### IPs and VIPs
Unlike `Pod` IP addresses, which actually route to a fixed destination, Unlike `Pod` IP addresses, which actually route to a fixed destination,
`Service` IPs are not actually answered by a single host. Instead, we use `Service` IPs are not actually answered by a single host. Instead, we use
`iptables` (packet processing logic in Linux) to define virtual IP addresses `iptables` (packet processing logic in Linux) to define virtual IP addresses
which are transparently redirected as needed. We call the tuple of the which are transparently redirected as needed. When clients connect to the
`Service` IP and the `Service` port the `portal`. When clients connect to the VIP, their traffic is automatically transported to an appropriate endpoint.
`portal`, their traffic is automatically transported to an appropriate The environment variables and DNS for `Services` are actually populated in
endpoint. The environment variables and DNS for `Services` are actually terms of the `Service`'s VIP and port.
populated in terms of the portal IP and port.
As an example, consider the image processing application described above. As an example, consider the image processing application described above.
When the backend `Service` is created, the Kubernetes master assigns a portal When the backend `Service` is created, the Kubernetes master assigns a virtual
IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the
portal is 10.0.0.1:1234. The master stores that information, which is then `Service` is observed by all of the `kube-proxy` instances in the cluster.
observed by all of the `kube-proxy` instances in the cluster. When a proxy When a proxy sees a new `Service`, it opens a new random port, establishes an
sees a new portal, it opens a new random port, establishes an iptables redirect iptables redirect from the VIP to this new port, and starts accepting
from the portal to this new port, and starts accepting connections on it. connections on it.
When a client connects to the portal the iptables rule kicks in, and redirects When a client connects to the VIP the iptables rule kicks in, and redirects
the packets to the `Service proxy`'s own port. The `Service proxy` chooses a the packets to the `Service proxy`'s own port. The `Service proxy` chooses a
backend, and starts proxying traffic from the client to the backend. backend, and starts proxying traffic from the client to the backend.

View File

@ -136,7 +136,7 @@ _sticky sessions_. With Kubernetes you can scale out your app easily
with session affinity. The [`meteor-service.json`](meteor-service.json) file contains with session affinity. The [`meteor-service.json`](meteor-service.json) file contains
`"sessionAffinity": "ClientIP"`, which provides this for us. See the `"sessionAffinity": "ClientIP"`, which provides this for us. See the
[service [service
documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#portals-and-service-proxies) documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#virtual-ips-and-service-proxies)
for more information. for more information.
As mentioned above, the mongo container uses a volume which is mapped As mentioned above, the mongo container uses a volume which is mapped

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Verifies that services and portals work. # Verifies that services and virtual IPs work.
set -o errexit set -o errexit
set -o nounset set -o nounset
@ -285,10 +285,10 @@ function verify_from_container() {
fi fi
done done
'")) \ '")) \
|| error "testing $1 portal from container failed" || error "testing $1 VIP from container failed"
found_pods=$(sort_args "${results[@]}") found_pods=$(sort_args "${results[@]}")
if [[ "${found_pods}" != "$5" ]]; then if [[ "${found_pods}" != "$5" ]]; then
error -e "$1 portal failed from container, expected:\n error -e "$1 VIP failed from container, expected:\n
$(printf '\t%s\n' $5)\n $(printf '\t%s\n' $5)\n
got:\n got:\n
$(printf '\t%s\n' ${found_pods}) $(printf '\t%s\n' ${found_pods})
@ -323,20 +323,20 @@ wait_for_pods "${svc2_name}" "${svc2_count}"
svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}") svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}")
svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}") svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}")
# Get the portal IPs. # Get the VIP IPs.
svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc1_name}" --api-version=v1beta3) svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc1_name}" --api-version=v1beta3)
test -n "${svc1_ip}" || error "Service1 IP is blank" test -n "${svc1_ip}" || error "Service1 IP is blank"
svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc2_name}" --api-version=v1beta3) svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc2_name}" --api-version=v1beta3)
test -n "${svc2_ip}" || error "Service2 IP is blank" test -n "${svc2_ip}" || error "Service2 IP is blank"
if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then
error "Portal IPs conflict: ${svc1_ip}" error "VIPs conflict: ${svc1_ip}"
fi fi
# #
# Test 1: Prove that the service portal is alive. # Test 1: Prove that the service VIP is alive.
# #
echo "Test 1: Prove that the service portal is alive." echo "Test 1: Prove that the service VIP is alive."
echo "Verifying the portals from the host" echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}" "${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do for ip in ${svc1_publics}; do
@ -345,7 +345,7 @@ for ip in ${svc1_publics}; do
done done
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}" "${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}" "${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do for ip in ${svc1_publics}; do
@ -356,17 +356,17 @@ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}" "${svc2_count}" "${svc2_pods}"
# #
# Test 2: Bounce the proxy and make sure the portal comes back. # Test 2: Bounce the proxy and make sure the VIP comes back.
# #
echo "Test 2: Bounce the proxy and make sure the portal comes back." echo "Test 2: Bounce the proxy and make sure the VIP comes back."
echo "Restarting kube-proxy" echo "Restarting kube-proxy"
restart-kube-proxy "${test_node}" restart-kube-proxy "${test_node}"
echo "Verifying the portals from the host" echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}" "${svc1_count}" "${svc1_pods}"
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}" "${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}" "${svc1_count}" "${svc1_pods}"
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
@ -395,14 +395,14 @@ wait_for_pods "${svc3_name}" "${svc3_count}"
# Get the sorted lists of pods. # Get the sorted lists of pods.
svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}") svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}")
# Get the portal IP. # Get the VIP.
svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc3_name}" --api-version=v1beta3) svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc3_name}" --api-version=v1beta3)
test -n "${svc3_ip}" || error "Service3 IP is blank" test -n "${svc3_ip}" || error "Service3 IP is blank"
echo "Verifying the portals from the host" echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
@ -415,31 +415,31 @@ echo "Manually removing iptables rules"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PROXY || true" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PROXY || true"
echo "Verifying the portals from the host" echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
# #
# Test 6: Restart the master, make sure portals come back. # Test 6: Restart the master, make sure VIPs come back.
# #
echo "Test 6: Restart the master, make sure portals come back." echo "Test 6: Restart the master, make sure VIPs come back."
echo "Restarting the master" echo "Restarting the master"
restart-apiserver "${master}" restart-apiserver "${master}"
sleep 5 sleep 5
echo "Verifying the portals from the host" echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}" "${svc3_count}" "${svc3_pods}"
# #
# Test 7: Bring up another service, make sure it does not re-use Portal IPs. # Test 7: Bring up another service, make sure it does not re-use IPs.
# #
echo "Test 7: Bring up another service, make sure it does not re-use Portal IPs." echo "Test 7: Bring up another service, make sure it does not re-use IPs."
svc4_name="service4" svc4_name="service4"
svc4_port=80 svc4_port=80
svc4_count=3 svc4_count=3
@ -451,17 +451,17 @@ wait_for_pods "${svc4_name}" "${svc4_count}"
# Get the sorted lists of pods. # Get the sorted lists of pods.
svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}") svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}")
# Get the portal IP. # Get the VIP.
svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc4_name}" --api-version=v1beta3) svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc4_name}" --api-version=v1beta3)
test -n "${svc4_ip}" || error "Service4 IP is blank" test -n "${svc4_ip}" || error "Service4 IP is blank"
if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then
error "Portal IPs conflict: ${svc4_ip}" error "VIPs conflict: ${svc4_ip}"
fi fi
echo "Verifying the portals from the host" echo "Verifying the VIPs from the host"
wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}" "${svc4_count}" "${svc4_pods}"
echo "Verifying the portals from a container" echo "Verifying the VIPs from a container"
verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}" "${svc4_count}" "${svc4_pods}"

View File

@ -18,6 +18,8 @@ package api
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
import ( import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
@ -25,7 +27,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf" "speter.net/go/exp/math/dec/inf"
"time"
) )
func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
@ -1928,7 +1929,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.ClusterIP = in.ClusterIP
out.Type = in.Type out.Type = in.Type
if in.DeprecatedPublicIPs != nil { if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -99,15 +99,15 @@ func NewDeleteOptions(grace int64) *DeleteOptions {
return &DeleteOptions{GracePeriodSeconds: &grace} return &DeleteOptions{GracePeriodSeconds: &grace}
} }
// this function aims to check if the service portal IP is set or not // this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here // the objective is not to perform validation here
func IsServiceIPSet(service *Service) bool { func IsServiceIPSet(service *Service) bool {
return service.Spec.PortalIP != PortalIPNone && service.Spec.PortalIP != "" return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != ""
} }
// this function aims to check if the service portal IP is requested or not // this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *Service) bool { func IsServiceIPRequested(service *Service) bool {
return service.Spec.PortalIP == "" return service.Spec.ClusterIP == ""
} }
var standardFinalizers = util.NewStringSet( var standardFinalizers = util.NewStringSet(

View File

@ -77,10 +77,10 @@ func TestBeforeUpdate(t *testing.T) {
expectErr: true, expectErr: true,
}, },
{ {
name: "change portal IP", name: "change ClusterIP",
tweakSvc: func(oldSvc, newSvc *api.Service) { tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4" oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.PortalIP = "4.3.2.1" newSvc.Spec.ClusterIP = "4.3.2.1"
}, },
expectErr: true, expectErr: true,
}, },

View File

@ -1004,9 +1004,9 @@ type ReplicationControllerList struct {
} }
const ( const (
// PortalIPNone - do not assign a portal IP // ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods // no proxying required and no environment variables should be created for pods
PortalIPNone = "None" ClusterIPNone = "None"
) )
// ServiceList holds a list of services. // ServiceList holds a list of services.
@ -1033,7 +1033,7 @@ type ServiceType string
const ( const (
// ServiceTypeClusterIP means a service will only be accessible inside the // ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP. // cluster, via the ClusterIP.
ServiceTypeClusterIP ServiceType = "ClusterIP" ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of // ServiceTypeNodePort means a service will be exposed on one port of
@ -1082,12 +1082,12 @@ type ServiceSpec struct {
// those endpoints. // those endpoints.
Selector map[string]string `json:"selector"` Selector map[string]string `json:"selector"`
// PortalIP is usually assigned by the master. If specified by the user // ClusterIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can // we will try to respect it or else fail the request. This field can
// not be changed by updates. // not be changed by updates.
// Valid values are None, empty string (""), or a valid IP address // Valid values are None, empty string (""), or a valid IP address
// None can be specified for headless services when proxying is not required // None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty"` ClusterIP string `json:"clusterIP,omitempty"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"` Type ServiceType `json:"type,omitempty"`

View File

@ -2116,7 +2116,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.ClusterIP = in.ClusterIP
out.Type = ServiceType(in.Type) out.Type = ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil { if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))
@ -4391,7 +4391,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.ClusterIP = in.ClusterIP
out.Type = api.ServiceType(in.Type) out.Type = api.ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil { if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -18,13 +18,14 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
import ( import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf" "speter.net/go/exp/math/dec/inf"
"time"
) )
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error {
@ -1864,7 +1865,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.ClusterIP = in.ClusterIP
out.Type = in.Type out.Type = in.Type
if in.DeprecatedPublicIPs != nil { if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -1015,7 +1015,7 @@ type ServiceType string
const ( const (
// ServiceTypeClusterIP means a service will only be accessible inside the // ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP. // cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP" ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of // ServiceTypeNodePort means a service will be exposed on one port of
@ -1062,12 +1062,12 @@ type ServiceSpec struct {
// This service will route traffic to pods having labels matching this selector. If null, no endpoints will be automatically created. If empty, all pods will be selected. // This service will route traffic to pods having labels matching this selector. If null, no endpoints will be automatically created. If empty, all pods will be selected.
Selector map[string]string `json:"selector,omitempty" description:"label keys and values that must match in order to receive traffic for this service; if empty, all pods are selected, if not specified, endpoints must be manually specified"` Selector map[string]string `json:"selector,omitempty" description:"label keys and values that must match in order to receive traffic for this service; if empty, all pods are selected, if not specified, endpoints must be manually specified"`
// PortalIP is usually assigned by the master. If specified by the user // ClusterIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can // we will try to respect it or else fail the request. This field can
// not be changed by updates. // not be changed by updates.
// Valid values are None, empty string (""), or a valid IP address // Valid values are None, empty string (""), or a valid IP address
// None can be specified for headless services when proxying is not required // None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"` ClusterIP string `json:"clusterIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"` Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
@ -1120,9 +1120,9 @@ type Service struct {
} }
const ( const (
// PortalIPNone - do not assign a portal IP // ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods // no proxying required and no environment variables should be created for pods
PortalIPNone = "None" ClusterIPNone = "None"
) )
// ServiceList holds a list of services. // ServiceList holds a list of services.

View File

@ -782,7 +782,7 @@ func addConversionFuncs() {
return err return err
} }
out.PublicIPs = in.Spec.DeprecatedPublicIPs out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP out.PortalIP = in.Spec.ClusterIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil { if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err return err
} }
@ -834,7 +834,7 @@ func addConversionFuncs() {
return err return err
} }
out.Spec.DeprecatedPublicIPs = in.PublicIPs out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP out.Spec.ClusterIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil { if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err return err
} }

View File

@ -704,7 +704,7 @@ func addConversionFuncs() {
return err return err
} }
out.PublicIPs = in.Spec.DeprecatedPublicIPs out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP out.PortalIP = in.Spec.ClusterIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil { if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err return err
} }
@ -756,7 +756,7 @@ func addConversionFuncs() {
return err return err
} }
out.Spec.DeprecatedPublicIPs = in.PublicIPs out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP out.Spec.ClusterIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil { if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err return err
} }

View File

@ -356,7 +356,7 @@ func convert_v1beta3_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Se
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.ClusterIP = in.PortalIP
typeIn := in.Type typeIn := in.Type
if typeIn == "" { if typeIn == "" {
@ -404,7 +404,7 @@ func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *Se
} else { } else {
out.Selector = nil out.Selector = nil
} }
out.PortalIP = in.PortalIP out.PortalIP = in.ClusterIP
if err := s.Convert(&in.Type, &out.Type, 0); err != nil { if err := s.Convert(&in.Type, &out.Type, 0); err != nil {
return err return err

View File

@ -1063,8 +1063,8 @@ func ValidateService(service *api.Service) errs.ValidationErrorList {
} }
if api.IsServiceIPSet(service) { if api.IsServiceIPSet(service) {
if ip := net.ParseIP(service.Spec.PortalIP); ip == nil { if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "portalIP should be empty, 'None', or a valid IP address")) allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "clusterIP should be empty, 'None', or a valid IP address"))
} }
} }
@ -1157,10 +1157,8 @@ func ValidateServiceUpdate(oldService, service *api.Service) errs.ValidationErro
allErrs := errs.ValidationErrorList{} allErrs := errs.ValidationErrorList{}
allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldService.ObjectMeta, &service.ObjectMeta).Prefix("metadata")...) allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldService.ObjectMeta, &service.ObjectMeta).Prefix("metadata")...)
// TODO: PortalIP should be a Status field, since the system can set a value != to the user's value if api.IsServiceIPSet(oldService) && service.Spec.ClusterIP != oldService.Spec.ClusterIP {
// once PortalIP is set, it cannot be unset. allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "field is immutable"))
if api.IsServiceIPSet(oldService) && service.Spec.PortalIP != oldService.Spec.PortalIP {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "field is immutable"))
} }
allErrs = append(allErrs, ValidateService(service)...) allErrs = append(allErrs, ValidateService(service)...)

View File

@ -1581,9 +1581,9 @@ func TestValidateService(t *testing.T) {
numErrs: 1, numErrs: 1,
}, },
{ {
name: "invalid portal ip", name: "invalid cluster ip",
tweakSvc: func(s *api.Service) { tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = "invalid" s.Spec.ClusterIP = "invalid"
}, },
numErrs: 1, numErrs: 1,
}, },
@ -1676,16 +1676,16 @@ func TestValidateService(t *testing.T) {
numErrs: 0, numErrs: 0,
}, },
{ {
name: "valid portal ip - none ", name: "valid cluster ip - none ",
tweakSvc: func(s *api.Service) { tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = "None" s.Spec.ClusterIP = "None"
}, },
numErrs: 0, numErrs: 0,
}, },
{ {
name: "valid portal ip - empty", name: "valid cluster ip - empty",
tweakSvc: func(s *api.Service) { tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = "" s.Spec.ClusterIP = ""
s.Spec.Ports[0].TargetPort = util.NewIntOrStringFromString("http") s.Spec.Ports[0].TargetPort = util.NewIntOrStringFromString("http")
}, },
numErrs: 0, numErrs: 0,
@ -2556,18 +2556,18 @@ func TestValidateServiceUpdate(t *testing.T) {
numErrs: 0, numErrs: 0,
}, },
{ {
name: "change portal IP", name: "change cluster IP",
tweakSvc: func(oldSvc, newSvc *api.Service) { tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4" oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.PortalIP = "8.6.7.5" newSvc.Spec.ClusterIP = "8.6.7.5"
}, },
numErrs: 1, numErrs: 1,
}, },
{ {
name: "remove portal IP", name: "remove cluster IP",
tweakSvc: func(oldSvc, newSvc *api.Service) { tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4" oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.PortalIP = "" newSvc.Spec.ClusterIP = ""
}, },
numErrs: 1, numErrs: 1,
}, },

View File

@ -506,7 +506,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(service.Labels)) fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(service.Labels))
fmt.Fprintf(out, "Selector:\t%s\n", formatLabels(service.Spec.Selector)) fmt.Fprintf(out, "Selector:\t%s\n", formatLabels(service.Spec.Selector))
fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type)
fmt.Fprintf(out, "IP:\t%s\n", service.Spec.PortalIP) fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP)
if len(service.Status.LoadBalancer.Ingress) > 0 { if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress) list := buildIngressString(service.Status.LoadBalancer.Ingress)
fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list) fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list)

View File

@ -553,7 +553,7 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool) error {
name = svc.Name name = svc.Name
} }
ips := []string{svc.Spec.PortalIP} ips := []string{svc.Spec.ClusterIP}
ingress := svc.Status.LoadBalancer.Ingress ingress := svc.Status.LoadBalancer.Ingress
for i := range ingress { for i := range ingress {

View File

@ -645,7 +645,7 @@ func TestPrintHumanReadableService(t *testing.T) {
tests := []api.Service{ tests := []api.Service{
{ {
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{ {
Port: 80, Port: 80,
@ -668,7 +668,7 @@ func TestPrintHumanReadableService(t *testing.T) {
}, },
{ {
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{ {
Port: 80, Port: 80,
@ -687,7 +687,7 @@ func TestPrintHumanReadableService(t *testing.T) {
}, },
{ {
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{ {
Port: 80, Port: 80,
@ -715,7 +715,7 @@ func TestPrintHumanReadableService(t *testing.T) {
}, },
{ {
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{ {
Port: 80, Port: 80,
@ -754,9 +754,9 @@ func TestPrintHumanReadableService(t *testing.T) {
buff := bytes.Buffer{} buff := bytes.Buffer{}
printService(&svc, &buff, false) printService(&svc, &buff, false)
output := string(buff.Bytes()) output := string(buff.Bytes())
ip := svc.Spec.PortalIP ip := svc.Spec.ClusterIP
if !strings.Contains(output, ip) { if !strings.Contains(output, ip) {
t.Errorf("expected to contain portal ip %s, but doesn't: %s", ip, output) t.Errorf("expected to contain ClusterIP %s, but doesn't: %s", ip, output)
} }
for _, ingress := range svc.Status.LoadBalancer.Ingress { for _, ingress := range svc.Status.LoadBalancer.Ingress {
@ -772,7 +772,7 @@ func TestPrintHumanReadableService(t *testing.T) {
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output) t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
} }
} }
// Max of # ports and (# public ip + portal ip) // Max of # ports and (# public ip + cluster ip)
count := len(svc.Spec.Ports) count := len(svc.Spec.Ports)
if len(svc.Status.LoadBalancer.Ingress)+1 > count { if len(svc.Status.LoadBalancer.Ingress)+1 > count {
count = len(svc.Status.LoadBalancer.Ingress) + 1 count = len(svc.Status.LoadBalancer.Ingress) + 1
@ -932,7 +932,7 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) {
obj: &api.Service{ obj: &api.Service{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{ {
Port: 80, Port: 80,

View File

@ -32,16 +32,16 @@ func FromServices(services *api.ServiceList) []api.EnvVar {
for i := range services.Items { for i := range services.Items {
service := &services.Items[i] service := &services.Items[i]
// ignore services where PortalIP is "None" or empty // ignore services where ClusterIP is "None" or empty
// the services passed to this method should be pre-filtered // the services passed to this method should be pre-filtered
// only services that have the portal IP set should be included here // only services that have the cluster IP set should be included here
if !api.IsServiceIPSet(service) { if !api.IsServiceIPSet(service) {
continue continue
} }
// Host // Host
name := makeEnvVariableName(service.Name) + "_SERVICE_HOST" name := makeEnvVariableName(service.Name) + "_SERVICE_HOST"
result = append(result, api.EnvVar{Name: name, Value: service.Spec.PortalIP}) result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP})
// First port - give it the backwards-compatible name // First port - give it the backwards-compatible name
name = makeEnvVariableName(service.Name) + "_SERVICE_PORT" name = makeEnvVariableName(service.Name) + "_SERVICE_PORT"
result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)}) result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)})
@ -81,14 +81,14 @@ func makeLinkVariables(service *api.Service) []api.EnvVar {
// Docker special-cases the first port. // Docker special-cases the first port.
all = append(all, api.EnvVar{ all = append(all, api.EnvVar{
Name: prefix + "_PORT", Name: prefix + "_PORT",
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port), Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),
}) })
} }
portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol)) portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol))
all = append(all, []api.EnvVar{ all = append(all, []api.EnvVar{
{ {
Name: portPrefix, Name: portPrefix,
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port), Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),
}, },
{ {
Name: portPrefix + "_PROTO", Name: portPrefix + "_PROTO",
@ -100,7 +100,7 @@ func makeLinkVariables(service *api.Service) []api.EnvVar {
}, },
{ {
Name: portPrefix + "_ADDR", Name: portPrefix + "_ADDR",
Value: service.Spec.PortalIP, Value: service.Spec.ClusterIP,
}, },
}...) }...)
} }

View File

@ -31,7 +31,7 @@ func TestFromServices(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "foo-bar"}, ObjectMeta: api.ObjectMeta{Name: "foo-bar"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: 8080, Protocol: "TCP"}, {Port: 8080, Protocol: "TCP"},
}, },
@ -41,7 +41,7 @@ func TestFromServices(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "abc-123"}, ObjectMeta: api.ObjectMeta{Name: "abc-123"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "5.6.7.8", ClusterIP: "5.6.7.8",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Name: "u-d-p", Port: 8081, Protocol: "UDP"}, {Name: "u-d-p", Port: 8081, Protocol: "UDP"},
{Name: "t-c-p", Port: 8081, Protocol: "TCP"}, {Name: "t-c-p", Port: 8081, Protocol: "TCP"},
@ -52,7 +52,7 @@ func TestFromServices(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"}, ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "9.8.7.6", ClusterIP: "9.8.7.6",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"}, {Port: 8082, Protocol: "TCP"},
{Name: "8083", Port: 8083, Protocol: "TCP"}, {Name: "8083", Port: 8083, Protocol: "TCP"},
@ -60,20 +60,20 @@ func TestFromServices(t *testing.T) {
}, },
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-none"}, ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "None", ClusterIP: "None",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"}, {Port: 8082, Protocol: "TCP"},
}, },
}, },
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-empty"}, ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "", ClusterIP: "",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"}, {Port: 8082, Protocol: "TCP"},
}, },

View File

@ -885,7 +885,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
// project the services in namespace ns onto the master services // project the services in namespace ns onto the master services
for _, service := range services.Items { for _, service := range services.Items {
// ignore services where PortalIP is "None" or empty // ignore services where ClusterIP is "None" or empty
if !api.IsServiceIPSet(&service) { if !api.IsServiceIPSet(&service) {
continue continue
} }

View File

@ -1260,7 +1260,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8081, Port: 8081,
}}, }},
PortalIP: "1.2.3.1", ClusterIP: "1.2.3.1",
}, },
}, },
{ {
@ -1270,7 +1270,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8083, Port: 8083,
}}, }},
PortalIP: "1.2.3.3", ClusterIP: "1.2.3.3",
}, },
}, },
{ {
@ -1280,7 +1280,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8084, Port: 8084,
}}, }},
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
}, },
{ {
@ -1290,7 +1290,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8085, Port: 8085,
}}, }},
PortalIP: "1.2.3.5", ClusterIP: "1.2.3.5",
}, },
}, },
{ {
@ -1300,7 +1300,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8085, Port: 8085,
}}, }},
PortalIP: "None", ClusterIP: "None",
}, },
}, },
{ {
@ -1319,7 +1319,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8086, Port: 8086,
}}, }},
PortalIP: "1.2.3.6", ClusterIP: "1.2.3.6",
}, },
}, },
{ {
@ -1329,7 +1329,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8088, Port: 8088,
}}, }},
PortalIP: "1.2.3.8", ClusterIP: "1.2.3.8",
}, },
}, },
{ {
@ -1339,7 +1339,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8088, Port: 8088,
}}, }},
PortalIP: "None", ClusterIP: "None",
}, },
}, },
{ {
@ -1349,7 +1349,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
Port: 8088, Port: 8088,
}}, }},
PortalIP: "", ClusterIP: "",
}, },
}, },
} }

View File

@ -36,21 +36,22 @@ import (
// Controller is the controller manager for the core bootstrap Kubernetes controller // Controller is the controller manager for the core bootstrap Kubernetes controller
// loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default" // loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default"
// namespace, and provide the IP repair check on service PortalIPs // namespace, and provide the IP repair check on service IPs
type Controller struct { type Controller struct {
NamespaceRegistry namespace.Registry NamespaceRegistry namespace.Registry
ServiceRegistry service.Registry ServiceRegistry service.Registry
ServiceIPRegistry service.RangeRegistry
EndpointRegistry endpoint.Registry
PortalNet *net.IPNet
// TODO: MasterCount is yucky // TODO: MasterCount is yucky
MasterCount int MasterCount int
ServiceClusterIPRegistry service.RangeRegistry
ServiceClusterIPInterval time.Duration
ServiceClusterIPRange *net.IPNet
ServiceNodePortRegistry service.RangeRegistry ServiceNodePortRegistry service.RangeRegistry
ServiceNodePortInterval time.Duration ServiceNodePortInterval time.Duration
ServiceNodePorts util.PortRange ServiceNodePortRange util.PortRange
PortalIPInterval time.Duration EndpointRegistry endpoint.Registry
EndpointInterval time.Duration EndpointInterval time.Duration
PublicIP net.IP PublicIP net.IP
@ -73,11 +74,11 @@ func (c *Controller) Start() {
return return
} }
repairPortals := servicecontroller.NewRepair(c.PortalIPInterval, c.ServiceRegistry, c.PortalNet, c.ServiceIPRegistry) repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceRegistry, c.ServiceClusterIPRange, c.ServiceClusterIPRegistry)
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePorts, c.ServiceNodePortRegistry) repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePortRange, c.ServiceNodePortRegistry)
// run all of the controllers once prior to returning from Start. // run all of the controllers once prior to returning from Start.
if err := repairPortals.RunOnce(); err != nil { if err := repairClusterIPs.RunOnce(); err != nil {
glog.Errorf("Unable to perform initial IP allocation check: %v", err) glog.Errorf("Unable to perform initial IP allocation check: %v", err)
} }
if err := repairNodePorts.RunOnce(); err != nil { if err := repairNodePorts.RunOnce(); err != nil {
@ -90,7 +91,7 @@ func (c *Controller) Start() {
glog.Errorf("Unable to perform initial Kubernetes RO service initialization: %v", err) glog.Errorf("Unable to perform initial Kubernetes RO service initialization: %v", err)
} }
c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairPortals.RunUntil, repairNodePorts.RunUntil) c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil)
c.runner.Start() c.runner.Start()
} }
@ -189,7 +190,7 @@ func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP n
Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}},
// maintained by this code, not by the pod selector // maintained by this code, not by the pod selector
Selector: nil, Selector: nil,
PortalIP: serviceIP.String(), ClusterIP: serviceIP.String(),
SessionAffinity: api.ServiceAffinityNone, SessionAffinity: api.ServiceAffinityNone,
}, },
} }

View File

@ -85,7 +85,6 @@ type Config struct {
EventTTL time.Duration EventTTL time.Duration
MinionRegexp string MinionRegexp string
KubeletClient client.KubeletClient KubeletClient client.KubeletClient
PortalNet *net.IPNet
// allow downstream consumers to disable the core controller loops // allow downstream consumers to disable the core controller loops
EnableCoreControllers bool EnableCoreControllers bool
EnableLogsSupport bool EnableLogsSupport bool
@ -142,15 +141,18 @@ type Config struct {
// The name of the cluster. // The name of the cluster.
ClusterName string ClusterName string
// The range of IPs to be assigned to services with type=ClusterIP or greater
ServiceClusterIPRange *net.IPNet
// The range of ports to be assigned to services with type=NodePort or greater // The range of ports to be assigned to services with type=NodePort or greater
ServiceNodePorts util.PortRange ServiceNodePortRange util.PortRange
} }
// Master contains state for a Kubernetes cluster master/api server. // Master contains state for a Kubernetes cluster master/api server.
type Master struct { type Master struct {
// "Inputs", Copied from Config // "Inputs", Copied from Config
portalNet *net.IPNet serviceClusterIPRange *net.IPNet
serviceNodePorts util.PortRange serviceNodePortRange util.PortRange
cacheTimeout time.Duration cacheTimeout time.Duration
mux apiserver.Mux mux apiserver.Mux
@ -196,7 +198,7 @@ type Master struct {
namespaceRegistry namespace.Registry namespaceRegistry namespace.Registry
serviceRegistry service.Registry serviceRegistry service.Registry
endpointRegistry endpoint.Registry endpointRegistry endpoint.Registry
portalAllocator service.RangeRegistry serviceClusterIPAllocator service.RangeRegistry
serviceNodePortAllocator service.RangeRegistry serviceNodePortAllocator service.RangeRegistry
// "Outputs" // "Outputs"
@ -219,26 +221,26 @@ func NewEtcdHelper(client tools.EtcdGetSet, version string, prefix string) (help
// setDefaults fills in any fields not set that are required to have valid data. // setDefaults fills in any fields not set that are required to have valid data.
func setDefaults(c *Config) { func setDefaults(c *Config) {
if c.PortalNet == nil { if c.ServiceClusterIPRange == nil {
defaultNet := "10.0.0.0/24" defaultNet := "10.0.0.0/24"
glog.Warningf("Portal net unspecified. Defaulting to %v.", defaultNet) glog.Warningf("Network range for service cluster IPs is unspecified. Defaulting to %v.", defaultNet)
_, portalNet, err := net.ParseCIDR(defaultNet) _, serviceClusterIPRange, err := net.ParseCIDR(defaultNet)
if err != nil { if err != nil {
glog.Fatalf("Unable to parse CIDR: %v", err) glog.Fatalf("Unable to parse CIDR: %v", err)
} }
if size := ipallocator.RangeSize(portalNet); size < 8 { if size := ipallocator.RangeSize(serviceClusterIPRange); size < 8 {
glog.Fatalf("The portal net range must be at least %d IP addresses", 8) glog.Fatalf("The service cluster IP range must be at least %d IP addresses", 8)
} }
c.PortalNet = portalNet c.ServiceClusterIPRange = serviceClusterIPRange
} }
if c.ServiceNodePorts.Size == 0 { if c.ServiceNodePortRange.Size == 0 {
// TODO: Currently no way to specify an empty range (do we need to allow this?) // TODO: Currently no way to specify an empty range (do we need to allow this?)
// We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE) // We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE)
// but then that breaks the strict nestedness of ServiceType. // but then that breaks the strict nestedness of ServiceType.
// Review post-v1 // Review post-v1
defaultServiceNodePorts := util.PortRange{Base: 30000, Size: 2767} defaultServiceNodePortRange := util.PortRange{Base: 30000, Size: 2767}
c.ServiceNodePorts = defaultServiceNodePorts c.ServiceNodePortRange = defaultServiceNodePortRange
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePorts) glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePortRange)
} }
if c.MasterCount == 0 { if c.MasterCount == 0 {
// Clearly, there will be at least one master. // Clearly, there will be at least one master.
@ -273,8 +275,8 @@ func setDefaults(c *Config) {
// New returns a new instance of Master from the given config. // New returns a new instance of Master from the given config.
// Certain config fields will be set to a default value if unset, // Certain config fields will be set to a default value if unset,
// including: // including:
// PortalNet // ServiceClusterIPRange
// ServiceNodePorts // ServiceNodePortRange
// MasterCount // MasterCount
// ReadOnlyPort // ReadOnlyPort
// ReadWritePort // ReadWritePort
@ -301,20 +303,20 @@ func New(c *Config) *Master {
glog.Fatalf("master.New() called with config.KubeletClient == nil") glog.Fatalf("master.New() called with config.KubeletClient == nil")
} }
// Select the first two valid IPs from portalNet to use as the master service portalIPs // Select the first two valid IPs from serviceClusterIPRange to use as the master service IPs
serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.PortalNet, 1) serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 1)
if err != nil { if err != nil {
glog.Fatalf("Failed to generate service read-only IP for master service: %v", err) glog.Fatalf("Failed to generate service read-only IP for master service: %v", err)
} }
serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.PortalNet, 2) serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 2)
if err != nil { if err != nil {
glog.Fatalf("Failed to generate service read-write IP for master service: %v", err) glog.Fatalf("Failed to generate service read-write IP for master service: %v", err)
} }
glog.V(4).Infof("Setting master service IPs based on PortalNet subnet to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP) glog.V(4).Infof("Setting master service IPs based to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP)
m := &Master{ m := &Master{
portalNet: c.PortalNet, serviceClusterIPRange: c.ServiceClusterIPRange,
serviceNodePorts: c.ServiceNodePorts, serviceNodePortRange: c.ServiceNodePortRange,
rootWebService: new(restful.WebService), rootWebService: new(restful.WebService),
enableCoreControllers: c.EnableCoreControllers, enableCoreControllers: c.EnableCoreControllers,
enableLogsSupport: c.EnableLogsSupport, enableLogsSupport: c.EnableLogsSupport,
@ -440,17 +442,17 @@ func (m *Master) init(c *Config) {
registry := etcd.NewRegistry(c.EtcdHelper, podRegistry, m.endpointRegistry) registry := etcd.NewRegistry(c.EtcdHelper, podRegistry, m.endpointRegistry)
m.serviceRegistry = registry m.serviceRegistry = registry
var portalRangeRegistry service.RangeRegistry var serviceClusterIPRegistry service.RangeRegistry
portalAllocator := ipallocator.NewAllocatorCIDRRange(m.portalNet, func(max int, rangeSpec string) allocator.Interface { serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec) mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.EtcdHelper) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.EtcdHelper)
portalRangeRegistry = etcd serviceClusterIPRegistry = etcd
return etcd return etcd
}) })
m.portalAllocator = portalRangeRegistry m.serviceClusterIPAllocator = serviceClusterIPRegistry
var serviceNodePortRegistry service.RangeRegistry var serviceNodePortRegistry service.RangeRegistry
serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePorts, func(max int, rangeSpec string) allocator.Interface { serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec) mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.EtcdHelper) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.EtcdHelper)
serviceNodePortRegistry = etcd serviceNodePortRegistry = etcd
@ -474,7 +476,7 @@ func (m *Master) init(c *Config) {
"podTemplates": podTemplateStorage, "podTemplates": podTemplateStorage,
"replicationControllers": controllerStorage, "replicationControllers": controllerStorage,
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, portalAllocator, serviceNodePortAllocator, c.ClusterName), "services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, c.ClusterName),
"endpoints": endpointsStorage, "endpoints": endpointsStorage,
"minions": nodeStorage, "minions": nodeStorage,
"minions/status": nodeStatusStorage, "minions/status": nodeStatusStorage,
@ -612,18 +614,19 @@ func (m *Master) NewBootstrapController() *Controller {
return &Controller{ return &Controller{
NamespaceRegistry: m.namespaceRegistry, NamespaceRegistry: m.namespaceRegistry,
ServiceRegistry: m.serviceRegistry, ServiceRegistry: m.serviceRegistry,
ServiceIPRegistry: m.portalAllocator,
EndpointRegistry: m.endpointRegistry,
PortalNet: m.portalNet,
MasterCount: m.masterCount, MasterCount: m.masterCount,
ServiceNodePortRegistry: m.serviceNodePortAllocator, EndpointRegistry: m.endpointRegistry,
ServiceNodePorts: m.serviceNodePorts,
ServiceNodePortInterval: 3 * time.Minute,
PortalIPInterval: 3 * time.Minute,
EndpointInterval: 10 * time.Second, EndpointInterval: 10 * time.Second,
ServiceClusterIPRegistry: m.serviceClusterIPAllocator,
ServiceClusterIPRange: m.serviceClusterIPRange,
ServiceClusterIPInterval: 3 * time.Minute,
ServiceNodePortRegistry: m.serviceNodePortAllocator,
ServiceNodePortRange: m.serviceNodePortRange,
ServiceNodePortInterval: 3 * time.Minute,
PublicIP: m.clusterIP, PublicIP: m.clusterIP,
ServiceIP: m.serviceReadWriteIP, ServiceIP: m.serviceReadWriteIP,

View File

@ -33,9 +33,13 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
) )
type portal struct {
ip net.IP
port int
}
type serviceInfo struct { type serviceInfo struct {
portalIP net.IP portal portal
portalPort int
protocol api.Protocol protocol api.Protocol
proxyPort int proxyPort int
socket proxySocket socket proxySocket
@ -252,9 +256,9 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
for i := range services { for i := range services {
service := &services[i] service := &services[i]
// if PortalIP is "None" or empty, skip proxying // if ClusterIP is "None" or empty, skip proxying
if !api.IsServiceIPSet(service) { if !api.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to portal IP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.PortalIP) glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.ClusterIP)
continue continue
} }
@ -263,7 +267,7 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
serviceName := ServicePortName{types.NamespacedName{service.Namespace, service.Name}, servicePort.Name} serviceName := ServicePortName{types.NamespacedName{service.Namespace, service.Name}, servicePort.Name}
activeServices[serviceName] = true activeServices[serviceName] = true
serviceIP := net.ParseIP(service.Spec.PortalIP) serviceIP := net.ParseIP(service.Spec.ClusterIP)
info, exists := proxier.getServiceInfo(serviceName) info, exists := proxier.getServiceInfo(serviceName)
// TODO: check health of the socket? What if ProxyLoop exited? // TODO: check health of the socket? What if ProxyLoop exited?
if exists && sameConfig(info, service, servicePort) { if exists && sameConfig(info, service, servicePort) {
@ -287,8 +291,8 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
glog.Errorf("Failed to start proxy for %q: %v", serviceName, err) glog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
continue continue
} }
info.portalIP = serviceIP info.portal.ip = serviceIP
info.portalPort = servicePort.Port info.portal.port = servicePort.Port
info.deprecatedPublicIPs = service.Spec.DeprecatedPublicIPs info.deprecatedPublicIPs = service.Spec.DeprecatedPublicIPs
// Deep-copy in case the service instance changes // Deep-copy in case the service instance changes
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
@ -321,10 +325,10 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
} }
func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool { func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
if info.protocol != port.Protocol || info.portalPort != port.Port || info.nodePort != port.NodePort { if info.protocol != port.Protocol || info.portal.port != port.Port || info.nodePort != port.NodePort {
return false return false
} }
if !info.portalIP.Equal(net.ParseIP(service.Spec.PortalIP)) { if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) {
return false return false
} }
if !ipsEqual(info.deprecatedPublicIPs, service.Spec.DeprecatedPublicIPs) { if !ipsEqual(info.deprecatedPublicIPs, service.Spec.DeprecatedPublicIPs) {
@ -352,19 +356,19 @@ func ipsEqual(lhs, rhs []string) bool {
} }
func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) error { func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) error {
err := proxier.openOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) err := proxier.openOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil { if err != nil {
return err return err
} }
for _, publicIP := range info.deprecatedPublicIPs { for _, publicIP := range info.deprecatedPublicIPs {
err = proxier.openOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil { if err != nil {
return err return err
} }
} }
for _, ingress := range info.loadBalancerStatus.Ingress { for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" { if ingress.IP != "" {
err = proxier.openOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil { if err != nil {
return err return err
} }
@ -379,27 +383,27 @@ func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) e
return nil return nil
} }
func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error { func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error {
// Handle traffic from containers. // Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil { if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerPortalChain, name) glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerPortalChain, name)
return err return err
} }
if !existed { if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort) glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
} }
// Handle traffic from the host. // Handle traffic from the host.
args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil { if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
return err return err
} }
if !existed { if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort) glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
} }
return nil return nil
} }
@ -480,13 +484,13 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyI
func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) error { func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) error {
// Collect errors and report them all at the end. // Collect errors and report them all at the end.
el := proxier.closeOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, publicIP := range info.deprecatedPublicIPs { for _, publicIP := range info.deprecatedPublicIPs {
el = append(el, proxier.closeOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...) el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
} }
for _, ingress := range info.loadBalancerStatus.Ingress { for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" { if ingress.IP != "" {
el = append(el, proxier.closeOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...) el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
} }
} }
if info.nodePort != 0 { if info.nodePort != 0 {
@ -500,18 +504,18 @@ func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo)
return errors.NewAggregate(el) return errors.NewAggregate(el)
} }
func (proxier *Proxier) closeOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error { func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error {
el := []error{} el := []error{}
// Handle traffic from containers. // Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err) el = append(el, err)
} }
// Handle traffic from the host. // Handle traffic from the host.
args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err) el = append(el, err)
@ -577,7 +581,7 @@ func iptablesInit(ipt iptables.Interface) error {
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems // This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order. // doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* portal_net matching could go here */ } args := []string{ /* portal_net matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle Portals; NOTE: this must be before the NodePort rules") args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err return err
} }

View File

@ -300,7 +300,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
@ -315,7 +315,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
if !exists { if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceP) t.Fatalf("can't find serviceInfo for %s", serviceP)
} }
if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 80 || svcInfo.protocol != "TCP" { if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo) t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
} }
@ -323,7 +323,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
if !exists { if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceQ) t.Fatalf("can't find serviceInfo for %s", serviceQ)
} }
if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 81 || svcInfo.protocol != "UDP" { if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo) t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
} }
@ -530,7 +530,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.proxyPort, Port: svcInfo.proxyPort,
Protocol: "TCP", Protocol: "TCP",
@ -582,7 +582,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.proxyPort, Port: svcInfo.proxyPort,
Protocol: "UDP", Protocol: "UDP",
@ -624,7 +624,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: 99, Port: 99,
Protocol: "TCP", Protocol: "TCP",
@ -671,7 +671,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: 99, Port: 99,
Protocol: "UDP", Protocol: "UDP",
@ -720,10 +720,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.portalPort, Port: svcInfo.portal.port,
Protocol: "TCP", Protocol: "TCP",
}}, }},
PortalIP: svcInfo.portalIP.String(), ClusterIP: svcInfo.portal.ip.String(),
DeprecatedPublicIPs: []string{"4.3.2.1"}, DeprecatedPublicIPs: []string{"4.3.2.1"},
}, },
}}) }})
@ -769,7 +769,7 @@ func TestProxyUpdatePortal(t *testing.T) {
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.proxyPort, Port: svcInfo.proxyPort,
Protocol: "TCP", Protocol: "TCP",
@ -777,12 +777,12 @@ func TestProxyUpdatePortal(t *testing.T) {
}}) }})
_, exists := p.getServiceInfo(service) _, exists := p.getServiceInfo(service)
if exists { if exists {
t.Fatalf("service with empty portalIP should not be included in the proxy") t.Fatalf("service with empty ClusterIP should not be included in the proxy")
} }
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "None", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.proxyPort, Port: svcInfo.proxyPort,
Protocol: "TCP", Protocol: "TCP",
@ -790,12 +790,12 @@ func TestProxyUpdatePortal(t *testing.T) {
}}) }})
_, exists = p.getServiceInfo(service) _, exists = p.getServiceInfo(service)
if exists { if exists {
t.Fatalf("service with 'None' as portalIP should not be included in the proxy") t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
} }
p.OnUpdate([]api.Service{{ p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p", Name: "p",
Port: svcInfo.proxyPort, Port: svcInfo.proxyPort,
Protocol: "TCP", Protocol: "TCP",
@ -803,7 +803,7 @@ func TestProxyUpdatePortal(t *testing.T) {
}}) }})
svcInfo, exists = p.getServiceInfo(service) svcInfo, exists = p.getServiceInfo(service)
if !exists { if !exists {
t.Fatalf("service with portalIP set not found in the proxy") t.Fatalf("service with ClusterIP set not found in the proxy")
} }
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1) waitForNumProxyLoops(t, p, 1)

View File

@ -27,17 +27,17 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
) )
// Repair is a controller loop that periodically examines all service PortalIP allocations // Repair is a controller loop that periodically examines all service ClusterIP allocations
// and logs any errors, and then sets the compacted and accurate list of all allocated IPs. // and logs any errors, and then sets the compacted and accurate list of all allocated IPs.
// //
// Handles: // Handles:
// * Duplicate PortalIP assignments caused by operator action or undetected race conditions // * Duplicate ClusterIP assignments caused by operator action or undetected race conditions
// * PortalIPs that do not match the current portal network // * ClusterIPs that do not match the currently configured range
// * Allocations to services that were not actually created due to a crash or powerloss // * Allocations to services that were not actually created due to a crash or powerloss
// * Migrates old versions of Kubernetes services into the atomic ipallocator model automatically // * Migrates old versions of Kubernetes services into the atomic ipallocator model automatically
// //
// Can be run at infrequent intervals, and is best performed on startup of the master. // Can be run at infrequent intervals, and is best performed on startup of the master.
// Is level driven and idempotent - all valid PortalIPs will be updated into the ipallocator // Is level driven and idempotent - all valid ClusterIPs will be updated into the ipallocator
// map at the end of a single execution loop if no race is encountered. // map at the end of a single execution loop if no race is encountered.
// //
// TODO: allocate new IPs if necessary // TODO: allocate new IPs if necessary
@ -49,7 +49,7 @@ type Repair struct {
alloc service.RangeRegistry alloc service.RangeRegistry
} }
// NewRepair creates a controller that periodically ensures that all portalIPs are uniquely allocated across the cluster // NewRepair creates a controller that periodically ensures that all clusterIPs are uniquely allocated across the cluster
// and generates informational warnings for a cluster that is not in sync. // and generates informational warnings for a cluster that is not in sync.
func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.RangeRegistry) *Repair { func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.RangeRegistry) *Repair {
return &Repair{ return &Repair{
@ -69,7 +69,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
}, c.interval, ch) }, c.interval, ch)
} }
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs. // RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error { func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read, // TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders, // or if they are executed against different leaders,
@ -94,27 +94,27 @@ func (c *Repair) RunOnce() error {
if !api.IsServiceIPSet(&svc) { if !api.IsServiceIPSet(&svc) {
continue continue
} }
ip := net.ParseIP(svc.Spec.PortalIP) ip := net.ParseIP(svc.Spec.ClusterIP)
if ip == nil { if ip == nil {
// portal IP is broken, reallocate // cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace)) util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.ClusterIP, svc.Name, svc.Namespace))
continue continue
} }
switch err := r.Allocate(ip); err { switch err := r.Allocate(ip); err {
case nil: case nil:
case ipallocator.ErrAllocated: case ipallocator.ErrAllocated:
// TODO: send event // TODO: send event
// portal IP is broken, reallocate // cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace)) util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
case ipallocator.ErrNotInRange: case ipallocator.ErrNotInRange:
// TODO: send event // TODO: send event
// portal IP is broken, reallocate // cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network)) util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
case ipallocator.ErrFull: case ipallocator.ErrFull:
// TODO: send event // TODO: send event
return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services") return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
default: default:
return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err) return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
} }
} }

View File

@ -121,22 +121,22 @@ func TestRepairWithExisting(t *testing.T) {
registry.List = api.ServiceList{ registry.List = api.ServiceList{
Items: []api.Service{ Items: []api.Service{
{ {
Spec: api.ServiceSpec{PortalIP: "192.168.1.1"}, Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"},
}, },
{ {
Spec: api.ServiceSpec{PortalIP: "192.168.1.100"}, Spec: api.ServiceSpec{ClusterIP: "192.168.1.100"},
}, },
{ // outside CIDR, will be dropped { // outside CIDR, will be dropped
Spec: api.ServiceSpec{PortalIP: "192.168.0.1"}, Spec: api.ServiceSpec{ClusterIP: "192.168.0.1"},
}, },
{ // empty, ignored { // empty, ignored
Spec: api.ServiceSpec{PortalIP: ""}, Spec: api.ServiceSpec{ClusterIP: ""},
}, },
{ // duplicate, dropped { // duplicate, dropped
Spec: api.ServiceSpec{PortalIP: "192.168.1.1"}, Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"},
}, },
{ // headless { // headless
Spec: api.ServiceSpec{PortalIP: "None"}, Spec: api.ServiceSpec{ClusterIP: "None"},
}, },
}, },
} }

View File

@ -46,19 +46,19 @@ type REST struct {
registry Registry registry Registry
machines minion.Registry machines minion.Registry
endpoints endpoint.Registry endpoints endpoint.Registry
portals ipallocator.Interface serviceIPs ipallocator.Interface
serviceNodePorts portallocator.Interface serviceNodePorts portallocator.Interface
clusterName string clusterName string
} }
// NewStorage returns a new REST. // NewStorage returns a new REST.
func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, portals ipallocator.Interface, func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, serviceIPs ipallocator.Interface,
serviceNodePorts portallocator.Interface, clusterName string) *REST { serviceNodePorts portallocator.Interface, clusterName string) *REST {
return &REST{ return &REST{
registry: registry, registry: registry,
machines: machines, machines: machines,
endpoints: endpoints, endpoints: endpoints,
portals: portals, serviceIPs: serviceIPs,
serviceNodePorts: serviceNodePorts, serviceNodePorts: serviceNodePorts,
clusterName: clusterName, clusterName: clusterName,
} }
@ -75,7 +75,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
defer func() { defer func() {
if releaseServiceIP { if releaseServiceIP {
if api.IsServiceIPSet(service) { if api.IsServiceIPSet(service) {
rs.portals.Release(net.ParseIP(service.Spec.PortalIP)) rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
} }
} }
}() }()
@ -85,17 +85,17 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
if api.IsServiceIPRequested(service) { if api.IsServiceIPRequested(service) {
// Allocate next available. // Allocate next available.
ip, err := rs.portals.AllocateNext() ip, err := rs.serviceIPs.AllocateNext()
if err != nil { if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())} el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
return nil, errors.NewInvalid("Service", service.Name, el) return nil, errors.NewInvalid("Service", service.Name, el)
} }
service.Spec.PortalIP = ip.String() service.Spec.ClusterIP = ip.String()
releaseServiceIP = true releaseServiceIP = true
} else if api.IsServiceIPSet(service) { } else if api.IsServiceIPSet(service) {
// Try to respect the requested IP. // Try to respect the requested IP.
if err := rs.portals.Allocate(net.ParseIP(service.Spec.PortalIP)); err != nil { if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())} el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
return nil, errors.NewInvalid("Service", service.Name, el) return nil, errors.NewInvalid("Service", service.Name, el)
} }
releaseServiceIP = true releaseServiceIP = true
@ -150,7 +150,7 @@ func (rs *REST) Delete(ctx api.Context, id string) (runtime.Object, error) {
} }
if api.IsServiceIPSet(service) { if api.IsServiceIPSet(service) {
rs.portals.Release(net.ParseIP(service.Spec.PortalIP)) rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
} }
for _, nodePort := range CollectServiceNodePorts(service) { for _, nodePort := range CollectServiceNodePorts(service) {

View File

@ -96,8 +96,8 @@ func TestServiceRegistryCreate(t *testing.T) {
if created_service.CreationTimestamp.IsZero() { if created_service.CreationTimestamp.IsZero() {
t.Errorf("Expected timestamp to be set, got: %v", created_service.CreationTimestamp) t.Errorf("Expected timestamp to be set, got: %v", created_service.CreationTimestamp)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
} }
srv, err := registry.GetService(ctx, svc.Name) srv, err := registry.GetService(ctx, svc.Name)
if err != nil { if err != nil {
@ -517,8 +517,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if created_service_1.Name != "foo" { if created_service_1.Name != "foo" {
t.Errorf("Expected foo, but got %v", created_service_1.Name) t.Errorf("Expected foo, but got %v", created_service_1.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP)
} }
svc2 := &api.Service{ svc2 := &api.Service{
@ -538,14 +538,14 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if created_service_2.Name != "bar" { if created_service_2.Name != "bar" {
t.Errorf("Expected bar, but got %v", created_service_2.Name) t.Errorf("Expected bar, but got %v", created_service_2.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP)
} }
testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"} testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"}
testIP := "" testIP := ""
for _, ip := range testIPs { for _, ip := range testIPs {
if !rest.portals.(*ipallocator.Range).Has(net.ParseIP(ip)) { if !rest.serviceIPs.(*ipallocator.Range).Has(net.ParseIP(ip)) {
testIP = ip testIP = ip
} }
} }
@ -554,7 +554,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "quux"}, ObjectMeta: api.ObjectMeta{Name: "quux"},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: testIP, ClusterIP: testIP,
SessionAffinity: api.ServiceAffinityNone, SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP, Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{ Ports: []api.ServicePort{{
@ -569,8 +569,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
created_service_3 := created_svc3.(*api.Service) created_service_3 := created_svc3.(*api.Service)
if created_service_3.Spec.PortalIP != testIP { // specific IP if created_service_3.Spec.ClusterIP != testIP { // specific IP
t.Errorf("Unexpected PortalIP: %s", created_service_3.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service_3.Spec.ClusterIP)
} }
} }
@ -595,8 +595,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if created_service_1.Name != "foo" { if created_service_1.Name != "foo" {
t.Errorf("Expected foo, but got %v", created_service_1.Name) t.Errorf("Expected foo, but got %v", created_service_1.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP)
} }
_, err := rest.Delete(ctx, created_service_1.Name) _, err := rest.Delete(ctx, created_service_1.Name)
@ -622,8 +622,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if created_service_2.Name != "bar" { if created_service_2.Name != "bar" {
t.Errorf("Expected bar, but got %v", created_service_2.Name) t.Errorf("Expected bar, but got %v", created_service_2.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP)
} }
} }
@ -648,8 +648,8 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
if created_service.Spec.Ports[0].Port != 6502 { if created_service.Spec.Ports[0].Port != 6502 {
t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port) t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
} }
update := deepCloneService(created_service) update := deepCloneService(created_service)
@ -663,7 +663,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
update = deepCloneService(created_service) update = deepCloneService(created_service)
update.Spec.Ports[0].Port = 6503 update.Spec.Ports[0].Port = 6503
update.Spec.PortalIP = "1.2.3.76" // error update.Spec.ClusterIP = "1.2.3.76" // error
_, _, err := rest.Update(ctx, update) _, _, err := rest.Update(ctx, update)
if err == nil || !errors.IsInvalid(err) { if err == nil || !errors.IsInvalid(err) {
@ -692,8 +692,8 @@ func TestServiceRegistryIPLoadBalancer(t *testing.T) {
if created_service.Spec.Ports[0].Port != 6502 { if created_service.Spec.Ports[0].Port != 6502 {
t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port) t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port)
} }
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
} }
update := deepCloneService(created_service) update := deepCloneService(created_service)
@ -750,7 +750,7 @@ func TestCreate(t *testing.T) {
&api.Service{ &api.Service{
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "None", ClusterIP: "None",
SessionAffinity: "None", SessionAffinity: "None",
Type: api.ServiceTypeClusterIP, Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{ Ports: []api.ServicePort{{
@ -767,7 +767,7 @@ func TestCreate(t *testing.T) {
&api.Service{ &api.Service{
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"}, Selector: map[string]string{"bar": "baz"},
PortalIP: "invalid", ClusterIP: "invalid",
SessionAffinity: "None", SessionAffinity: "None",
Type: api.ServiceTypeClusterIP, Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{ Ports: []api.ServicePort{{

View File

@ -428,7 +428,7 @@ func chooseHostInterfaceNativeGo() (net.IP, error) {
if ip == nil { if ip == nil {
return nil, fmt.Errorf("no acceptable interface from host") return nil, fmt.Errorf("no acceptable interface from host")
} }
glog.V(4).Infof("Choosing interface %s for from-host portals", intfs[i].Name) glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip)
return ip, nil return ip, nil
} }

View File

@ -209,7 +209,7 @@ var _ = Describe("DNS", func() {
Name: testServiceName, Name: testServiceName,
}, },
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []api.ServicePort{ Ports: []api.ServicePort{
{Port: 80}, {Port: 80},
}, },