Rename 'portal IP' to 'cluster IP' most everywhere

This covers obvious transforms, but not --portal_net, $PORTAL_NET and
similar.
This commit is contained in:
Tim Hockin 2015-05-23 13:41:11 -07:00
parent 46686616d4
commit 4318ca5a8b
43 changed files with 389 additions and 326 deletions

View File

@ -213,7 +213,7 @@ func (ks *kube2sky) handleEndpointAdd(obj interface{}) {
func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error {
for i := range service.Spec.Ports {
b, err := json.Marshal(getSkyMsg(service.Spec.PortalIP, service.Spec.Ports[i].Port))
b, err := json.Marshal(getSkyMsg(service.Spec.ClusterIP, service.Spec.Ports[i].Port))
if err != nil {
return err
}
@ -229,7 +229,7 @@ func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error {
if len(service.Spec.Ports) == 0 {
glog.Fatalf("unexpected service with no ports: %v", service)
}
// if PortalIP is not set, a DNS entry should not be created
// if ClusterIP is not set, a DNS entry should not be created
if !kapi.IsServiceIPSet(service) {
return ks.newHeadlessService(subdomain, service)
}

View File

@ -94,7 +94,7 @@ type hostPort struct {
func getHostPort(service *kapi.Service) *hostPort {
return &hostPort{
Host: service.Spec.PortalIP,
Host: service.Spec.ClusterIP,
Port: service.Spec.Ports[0].Port,
}
}
@ -134,7 +134,7 @@ func TestHeadlessService(t *testing.T) {
Namespace: testNamespace,
},
Spec: kapi.ServiceSpec{
PortalIP: "None",
ClusterIP: "None",
Ports: []kapi.ServicePort{
{Port: 80},
},
@ -187,7 +187,7 @@ func TestHeadlessServiceEndpointsUpdate(t *testing.T) {
Namespace: testNamespace,
},
Spec: kapi.ServiceSpec{
PortalIP: "None",
ClusterIP: "None",
Ports: []kapi.ServicePort{
{Port: 80},
},
@ -244,7 +244,7 @@ func TestHeadlessServiceWithDelayedEndpointsAddition(t *testing.T) {
Namespace: testNamespace,
},
Spec: kapi.ServiceSpec{
PortalIP: "None",
ClusterIP: "None",
Ports: []kapi.ServicePort{
{Port: 80},
},
@ -308,7 +308,7 @@ func TestAddSinglePortService(t *testing.T) {
Port: 80,
},
},
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
},
}
k2s.newService(&service)
@ -334,12 +334,12 @@ func TestUpdateSinglePortService(t *testing.T) {
Port: 80,
},
},
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
},
}
k2s.newService(&service)
assert.Len(t, ec.writes, 2)
service.Spec.PortalIP = "0.0.0.0"
service.Spec.ClusterIP = "0.0.0.0"
k2s.newService(&service)
expectedValue := getHostPort(&service)
assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue)
@ -363,7 +363,7 @@ func TestDeleteSinglePortService(t *testing.T) {
Port: 80,
},
},
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
},
}
// Add the service

View File

@ -23,7 +23,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
export roles=("ai" "i" "i")
# Define minion numbers
export NUM_MINIONS=${NUM_MINIONS:-3}
# define the IP range used for service portal.
# define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export PORTAL_NET=192.168.3.0/24
# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range

View File

@ -85,8 +85,8 @@ type APIServer struct {
OldEtcdPathPrefix string
CorsAllowedOriginList util.StringList
AllowPrivileged bool
PortalNet util.IPNet // TODO: make this a list
ServiceNodePorts util.PortRange
ServiceClusterIPRange util.IPNet // TODO: make this a list
ServiceNodePortRange util.PortRange
EnableLogsSupport bool
MasterServiceNamespace string
RuntimeConfig util.ConfigurationMap
@ -183,8 +183,9 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.OldEtcdPathPrefix, "old-etcd-prefix", s.OldEtcdPathPrefix, "The previous prefix for all resource paths in etcd, if any.")
fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.")
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.")
fs.Var(&s.PortalNet, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
fs.Var(&s.ServiceNodePorts, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.")
fs.Var(&s.ServiceClusterIPRange, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
fs.Var(&s.ServiceNodePortRange, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.")
fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods")
fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/<version> key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.")
client.BindKubeletClientConfigFlags(fs, &s.KubeletConfig)
@ -196,9 +197,9 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
}
// TODO: Longer term we should read this from some config store, rather than a flag.
func (s *APIServer) verifyPortalFlags() {
if s.PortalNet.IP == nil {
glog.Fatal("No --portal-net specified")
func (s *APIServer) verifyClusterIPFlags() {
if s.ServiceClusterIPRange.IP == nil {
glog.Fatal("No --service-cluster-ip-range specified")
}
}
@ -227,7 +228,7 @@ func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersi
// Run runs the specified APIServer. This should never exit.
func (s *APIServer) Run(_ []string) error {
s.verifyPortalFlags()
s.verifyClusterIPFlags()
if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
glog.Fatalf("specify either --etcd-servers or --etcd-config")
@ -302,7 +303,7 @@ func (s *APIServer) Run(_ []string) error {
}
}
n := net.IPNet(s.PortalNet)
n := net.IPNet(s.ServiceClusterIPRange)
// Default to the private server key for service account token signing
if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
@ -349,7 +350,7 @@ func (s *APIServer) Run(_ []string) error {
EtcdHelper: helper,
EventTTL: s.EventTTL,
KubeletClient: kubeletClient,
PortalNet: &n,
ServiceClusterIPRange: &n,
EnableCoreControllers: true,
EnableLogsSupport: s.EnableLogsSupport,
EnableUISupport: true,

View File

@ -83,7 +83,7 @@ We want to be able to assign IP addresses externally from Docker ([Docker issue
In addition to enabling self-registration with 3rd-party discovery mechanisms, we'd like to setup DDNS automatically ([Issue #146](https://github.com/GoogleCloudPlatform/kubernetes/issues/146)). hostname, $HOSTNAME, etc. should return a name for the pod ([Issue #298](https://github.com/GoogleCloudPlatform/kubernetes/issues/298)), and gethostbyname should be able to resolve names of other pods. Probably we need to set up a DNS resolver to do the latter ([Docker issue #2267](https://github.com/dotcloud/docker/issues/2267)), so that we don't need to keep /etc/hosts files up to date dynamically.
[Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service portal IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service portal IP in DNS, and for that to become the preferred resolution protocol.
[Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service's IP in DNS, and for that to become the preferred resolution protocol.
We'd also like to accommodate other load-balancing solutions (e.g., HAProxy), non-load-balanced services ([Issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260)), and other types of groups (worker pools, etc.). Providing the ability to Watch a label selector applied to pod addresses would enable efficient monitoring of group membership, which could be directly consumed or synced with a discovery mechanism. Event hooks ([Issue #140](https://github.com/GoogleCloudPlatform/kubernetes/issues/140)) for join/leave events would probably make this even easier.

View File

@ -87,7 +87,7 @@ Some firewall software that uses iptables may not interact well with
kubernetes. If you're having trouble around networking, try disabling any
firewall or other iptables-using systems, first.
By default the IP range for service portals is 10.0.*.* - depending on your
By default the IP range for service cluster IPs is 10.0.*.* - depending on your
docker installation, this may conflict with IPs for containers. If you find
containers running with IPs in this range, edit hack/local-cluster-up.sh and
change the portal_net flag to something else.

View File

@ -235,7 +235,7 @@ $ mesos ps
```
The number of Kubernetes pods listed earlier (from `bin/kubectl get pods`) should equal to the number active Mesos tasks listed the previous listing (`mesos ps`).
Next, determine the internal IP address of the front end [service portal][8]:
Next, determine the internal IP address of the front end [service][8]:
```bash
$ bin/kubectl get services
@ -268,14 +268,14 @@ Or interact with the frontend application via your browser, in 2 steps:
First, open the firewall on the master machine.
```bash
# determine the internal port for the frontend service portal
# determine the internal port for the frontend service
$ sudo iptables-save|grep -e frontend # -- port 36336 in this case
-A KUBE-PORTALS-CONTAINER -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-CONTAINER -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-HOST -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
-A KUBE-PORTALS-HOST -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336
# open up access to the internal port for the frontend service portal
# open up access to the internal port for the frontend service
$ sudo iptables -A INPUT -i eth0 -p tcp -m state --state NEW,ESTABLISHED -m tcp \
--dport ${internal_frontend_service_port} -j ACCEPT
```
@ -297,7 +297,7 @@ Now, you can visit the guestbook in your browser!
[5]: https://google.mesosphere.com
[6]: http://mesosphere.com/docs/getting-started/cloud/google/mesosphere/#vpn-setup
[7]: https://github.com/mesosphere/kubernetes-mesos/tree/v0.4.0/examples/guestbook
[8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-portals
[8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-vips
[9]: mesos/k8s-firewall.png
[10]: mesos/k8s-guestbook.png
[11]: http://mesos.apache.org/

View File

@ -135,7 +135,7 @@ The the kube-apiserver several options.
DEPRECATED: see --insecure-port instead
**--portal-net**=<nil>
A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.
A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
**--profiling**=true
Enable profiling via web interface host:port/debug/pprof/

View File

@ -179,7 +179,7 @@ The the kube\-apiserver several options.
.PP
\fB\-\-portal\-net\fP=
A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.
A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
.PP
\fB\-\-profiling\fP=true

View File

@ -42,7 +42,7 @@ applications will expose one or more network endpoints for clients to connect to
balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to
server traffic for applications. This is the primary, but not sole, source of data for making decisions.
Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-portals)
Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-vips)
running on each node directs service requests to the underlying implementation.
While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage

View File

@ -20,6 +20,58 @@ clustered database or key-value store. We will target such workloads for our
## v1 APIs
For existing and future workloads, we want to provide a consistent, stable set of APIs, over which developers can build and extend Kubernetes. This includes input validation, a consistent API structure, clean semantics, and improved diagnosability of the system.
||||||| merged common ancestors
## APIs and core features
1. Consistent v1 API
- Status: DONE. [v1beta3](http://kubernetesio.blogspot.com/2015/04/introducing-kubernetes-v1beta3.html) was developed as the release candidate for the v1 API.
2. Multi-port services for apps which need more than one port on the same portal IP ([#1802](https://github.com/GoogleCloudPlatform/kubernetes/issues/1802))
- Status: DONE. Released in 0.15.0
3. Nominal services for applications which need one stable IP per pod instance ([#260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260))
- Status: #2585 covers some design options.
4. API input is scrubbed of status fields in favor of a new API to set status ([#4248](https://github.com/GoogleCloudPlatform/kubernetes/issues/4248))
- Status: DONE
5. Input validation reporting versioned field names ([#3084](https://github.com/GoogleCloudPlatform/kubernetes/issues/3084))
- Status: in progress
6. Error reporting: Report common problems in ways that users can discover
- Status:
7. Event management: Make events usable and useful
- Status:
8. Persistent storage support ([#5105](https://github.com/GoogleCloudPlatform/kubernetes/issues/5105))
- Status: in progress
9. Allow nodes to join/leave a cluster ([#6087](https://github.com/GoogleCloudPlatform/kubernetes/issues/6087),[#3168](https://github.com/GoogleCloudPlatform/kubernetes/issues/3168))
- Status: in progress ([#6949](https://github.com/GoogleCloudPlatform/kubernetes/pull/6949))
10. Handle node death
- Status: mostly covered by nodes joining/leaving a cluster
11. Allow live cluster upgrades ([#6075](https://github.com/GoogleCloudPlatform/kubernetes/issues/6075),[#6079](https://github.com/GoogleCloudPlatform/kubernetes/issues/6079))
- Status: design in progress
12. Allow kernel upgrades
- Status: mostly covered by nodes joining/leaving a cluster, need demonstration
13. Allow rolling-updates to fail gracefully ([#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353))
- Status:
14. Easy .dockercfg
- Status:
15. Demonstrate cluster stability over time
- Status
16. Kubelet use the kubernetes API to fetch jobs to run (instead of etcd) on supported platforms
- Status: DONE
## Reliability and performance
1. Restart system components in case of crash (#2884)
- Status: in progress
2. Scale to 100 nodes (#3876)
- Status: in progress
3. Scale to 30-50 pods (1-2 containers each) per node (#4188)
- Status:
4. Scheduling throughput: 99% of scheduling decisions made in less than 1s on 100 node, 3000 pod cluster; linear time to number of nodes and pods (#3954)
5. Startup time: 99% of end-to-end pod startup time with prepulled images is less than 5s on 100 node, 3000 pod cluster; linear time to number of nodes and pods (#3952, #3954)
- Status:
6. API performance: 99% of API calls return in less than 1s; constant time to number of nodes and pods (#4521)
- Status:
7. Manage and report disk space on nodes (#4135)
- Status: in progress
8. API test coverage more than 85% in e2e tests
- Status:
In addition, we will provide versioning and deprecation policies for the APIs.

View File

@ -31,7 +31,7 @@ that is updated whenever the set of `Pods` in a `Service` changes. For
non-native applications, Kubernetes offers a virtual-IP-based bridge to Services
which redirects to the backend `Pods`.
## Defining a Service
## Defining a service
A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the
REST objects, a `Service` definition can be POSTed to the apiserver to create a
@ -138,7 +138,7 @@ Accessing a `Service` without a selector works the same as if it had selector.
The traffic will be routed to endpoints defined by the user (`1.2.3.4:80` in
this example).
## Portals and service proxies
## Virtual IPs and service proxies
Every node in a Kubernetes cluster runs a `kube-proxy`. This application
watches the Kubernetes master for the addition and removal of `Service`
@ -199,20 +199,22 @@ disambiguated. For example:
}
```
## Choosing your own PortalIP address
## Choosing your own IP address
A user can specify their own `PortalIP` address as part of a `Service` creation
request. For example, if they already have an existing DNS entry that they
wish to replace, or legacy systems that are configured for a specific IP
address and difficult to re-configure. The `PortalIP` address that a user
A user can specify their own cluster IP address as part of a `Service` creation
request. To do this, set the `spec.clusterIP` field (called `portalIP` in
v1beta3 and earlier APIs). For example, if they already have an existing DNS
entry that they wish to replace, or legacy systems that are configured for a
specific IP address and difficult to re-configure. The IP address that a user
chooses must be a valid IP address and within the portal_net CIDR range that is
specified by flag to the API server. If the PortalIP value is invalid, the
specified by flag to the API server. If the IP address value is invalid, the
apiserver returns a 422 HTTP status code to indicate that the value is invalid.
### Why not use round-robin DNS?
A question that pops up every now and then is why we do all this stuff with
portals rather than just use standard round-robin DNS. There are a few reasons:
virtual IPs rather than just use standard round-robin DNS. There are a few
reasons:
* There is a long history of DNS libraries not respecting DNS TTLs and
caching the results of name lookups.
@ -221,7 +223,7 @@ portals rather than just use standard round-robin DNS. There are a few reasons:
client re-resolving DNS over and over would be difficult to manage.
We try to discourage users from doing things that hurt themselves. That said,
if enough people ask for this, we may implement it as an alternative to portals.
if enough people ask for this, we may implement it as an alternative.
## Discovering services
@ -238,7 +240,7 @@ and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables,
where the Service name is upper-cased and dashes are converted to underscores.
For example, the Service "redis-master" which exposes TCP port 6379 and has been
allocated portal IP address 10.0.0.11 produces the following environment
allocated cluster IP address 10.0.0.11 produces the following environment
variables:
```
@ -272,24 +274,25 @@ cluster IP.
We will soon add DNS support for multi-port `Service`s in the form of SRV
records.
## Headless Services
## Headless services
Sometimes you don't need or want a single service IP. In this case, you can
create "headless" services by specifying `"None"` for the `PortalIP`. For such
`Service`s, a cluster IP is not allocated and service-specific environment
variables for `Pod`s are not created. DNS is configured to return multiple A
records (addresses) for the `Service` name, which point directly to the `Pod`s
backing the `Service`. Additionally, the kube proxy does not handle these
services and there is no load balancing or proxying done by the platform for
them. The endpoints controller will still create `Endpoints` records in the
API.
Sometimes you don't need or want load-balancing and a single service IP. In
this case, you can create "headless" services by specifying `"None"` for the
cluster IP (`spec.clusterIP` or `spec.portalIP` in v1beta3 and earlier APIs).
For such `Service`s, a cluster IP is not allocated and service-specific
environment variables for `Pod`s are not created. DNS is configured to return
multiple A records (addresses) for the `Service` name, which point directly to
the `Pod`s backing the `Service`. Additionally, the kube proxy does not handle
these services and there is no load balancing or proxying done by the platform
for them. The endpoints controller will still create `Endpoints` records in
the API.
This option allows developers to reduce coupling to the Kubernetes system, if
they desire, but leaves them freedom to do discovery in their own way.
Applications can still use a self-registration pattern and adapters for other
discovery systems could easily be built upon this API.
## External Services
## External services
For some parts of your application (e.g. frontends) you may want to expose a
Service onto an external (outside of your cluster, maybe public internet) IP
@ -366,7 +369,7 @@ though exactly how that works depends on the cloud provider.
## Shortcomings
We expect that using iptables and userspace proxies for portals will work at
We expect that using iptables and userspace proxies for VIPs will work at
small to medium scale, but may not scale to very large clusters with thousands
of Services. See [the original design proposal for
portals](https://github.com/GoogleCloudPlatform/kubernetes/issues/1107) for more
@ -387,7 +390,7 @@ but the current API requires it.
In the future we envision that the proxy policy can become more nuanced than
simple round robin balancing, for example master elected or sharded. We also
envision that some `Services` will have "real" load balancers, in which case the
portal will simply transport the packets there.
VIP will simply transport the packets there.
There's a
[proposal](https://github.com/GoogleCloudPlatform/kubernetes/issues/3760) to
@ -400,7 +403,7 @@ We intend to have first-class support for L7 (HTTP) `Service`s.
We intend to have more flexible ingress modes for `Service`s which encompass
the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more.
## The gory details of portals
## The gory details of virtual IPs
The previous information should be sufficient for many people who just want to
use `Services`. However, there is a lot going on behind the scenes that may be
@ -427,26 +430,25 @@ of Kubernetes that used in memory locking) as well as checking for invalid
assignments due to administrator intervention and cleaning up any any IPs
that were allocated but which no service currently uses.
### IPs and Portals
### IPs and VIPs
Unlike `Pod` IP addresses, which actually route to a fixed destination,
`Service` IPs are not actually answered by a single host. Instead, we use
`iptables` (packet processing logic in Linux) to define virtual IP addresses
which are transparently redirected as needed. We call the tuple of the
`Service` IP and the `Service` port the `portal`. When clients connect to the
`portal`, their traffic is automatically transported to an appropriate
endpoint. The environment variables and DNS for `Services` are actually
populated in terms of the portal IP and port.
which are transparently redirected as needed. When clients connect to the
VIP, their traffic is automatically transported to an appropriate endpoint.
The environment variables and DNS for `Services` are actually populated in
terms of the `Service`'s VIP and port.
As an example, consider the image processing application described above.
When the backend `Service` is created, the Kubernetes master assigns a portal
When the backend `Service` is created, the Kubernetes master assigns a virtual
IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the
portal is 10.0.0.1:1234. The master stores that information, which is then
observed by all of the `kube-proxy` instances in the cluster. When a proxy
sees a new portal, it opens a new random port, establishes an iptables redirect
from the portal to this new port, and starts accepting connections on it.
`Service` is observed by all of the `kube-proxy` instances in the cluster.
When a proxy sees a new `Service`, it opens a new random port, establishes an
iptables redirect from the VIP to this new port, and starts accepting
connections on it.
When a client connects to the portal the iptables rule kicks in, and redirects
When a client connects to the VIP the iptables rule kicks in, and redirects
the packets to the `Service proxy`'s own port. The `Service proxy` chooses a
backend, and starts proxying traffic from the client to the backend.

View File

@ -136,7 +136,7 @@ _sticky sessions_. With Kubernetes you can scale out your app easily
with session affinity. The [`meteor-service.json`](meteor-service.json) file contains
`"sessionAffinity": "ClientIP"`, which provides this for us. See the
[service
documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#portals-and-service-proxies)
documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#virtual-ips-and-service-proxies)
for more information.
As mentioned above, the mongo container uses a volume which is mapped

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that services and portals work.
# Verifies that services and virtual IPs work.
set -o errexit
set -o nounset
@ -285,10 +285,10 @@ function verify_from_container() {
fi
done
'")) \
|| error "testing $1 portal from container failed"
|| error "testing $1 VIP from container failed"
found_pods=$(sort_args "${results[@]}")
if [[ "${found_pods}" != "$5" ]]; then
error -e "$1 portal failed from container, expected:\n
error -e "$1 VIP failed from container, expected:\n
$(printf '\t%s\n' $5)\n
got:\n
$(printf '\t%s\n' ${found_pods})
@ -323,20 +323,20 @@ wait_for_pods "${svc2_name}" "${svc2_count}"
svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}")
svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}")
# Get the portal IPs.
# Get the VIP IPs.
svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc1_name}" --api-version=v1beta3)
test -n "${svc1_ip}" || error "Service1 IP is blank"
svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc2_name}" --api-version=v1beta3)
test -n "${svc2_ip}" || error "Service2 IP is blank"
if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then
error "Portal IPs conflict: ${svc1_ip}"
error "VIPs conflict: ${svc1_ip}"
fi
#
# Test 1: Prove that the service portal is alive.
# Test 1: Prove that the service VIP is alive.
#
echo "Test 1: Prove that the service portal is alive."
echo "Verifying the portals from the host"
echo "Test 1: Prove that the service VIP is alive."
echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do
@ -345,7 +345,7 @@ for ip in ${svc1_publics}; do
done
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
for ip in ${svc1_publics}; do
@ -356,17 +356,17 @@ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
#
# Test 2: Bounce the proxy and make sure the portal comes back.
# Test 2: Bounce the proxy and make sure the VIP comes back.
#
echo "Test 2: Bounce the proxy and make sure the portal comes back."
echo "Test 2: Bounce the proxy and make sure the VIP comes back."
echo "Restarting kube-proxy"
restart-kube-proxy "${test_node}"
echo "Verifying the portals from the host"
echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
@ -395,14 +395,14 @@ wait_for_pods "${svc3_name}" "${svc3_count}"
# Get the sorted lists of pods.
svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}")
# Get the portal IP.
# Get the VIP.
svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc3_name}" --api-version=v1beta3)
test -n "${svc3_ip}" || error "Service3 IP is blank"
echo "Verifying the portals from the host"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
@ -415,31 +415,31 @@ echo "Manually removing iptables rules"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PROXY || true"
echo "Verifying the portals from the host"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 6: Restart the master, make sure portals come back.
# Test 6: Restart the master, make sure VIPs come back.
#
echo "Test 6: Restart the master, make sure portals come back."
echo "Test 6: Restart the master, make sure VIPs come back."
echo "Restarting the master"
restart-apiserver "${master}"
sleep 5
echo "Verifying the portals from the host"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 7: Bring up another service, make sure it does not re-use Portal IPs.
# Test 7: Bring up another service, make sure it does not re-use IPs.
#
echo "Test 7: Bring up another service, make sure it does not re-use Portal IPs."
echo "Test 7: Bring up another service, make sure it does not re-use IPs."
svc4_name="service4"
svc4_port=80
svc4_count=3
@ -451,17 +451,17 @@ wait_for_pods "${svc4_name}" "${svc4_count}"
# Get the sorted lists of pods.
svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}")
# Get the portal IP.
# Get the VIP.
svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc4_name}" --api-version=v1beta3)
test -n "${svc4_ip}" || error "Service4 IP is blank"
if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then
error "Portal IPs conflict: ${svc4_ip}"
error "VIPs conflict: ${svc4_ip}"
fi
echo "Verifying the portals from the host"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"
echo "Verifying the portals from a container"
echo "Verifying the VIPs from a container"
verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"

View File

@ -18,6 +18,8 @@ package api
// AUTO-GENERATED FUNCTIONS START HERE
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
@ -25,7 +27,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf"
"time"
)
func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
@ -1928,7 +1929,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.ClusterIP = in.ClusterIP
out.Type = in.Type
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -99,15 +99,15 @@ func NewDeleteOptions(grace int64) *DeleteOptions {
return &DeleteOptions{GracePeriodSeconds: &grace}
}
// this function aims to check if the service portal IP is set or not
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *Service) bool {
return service.Spec.PortalIP != PortalIPNone && service.Spec.PortalIP != ""
return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service portal IP is requested or not
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *Service) bool {
return service.Spec.PortalIP == ""
return service.Spec.ClusterIP == ""
}
var standardFinalizers = util.NewStringSet(

View File

@ -77,10 +77,10 @@ func TestBeforeUpdate(t *testing.T) {
expectErr: true,
},
{
name: "change portal IP",
name: "change ClusterIP",
tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4"
newSvc.Spec.PortalIP = "4.3.2.1"
oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.ClusterIP = "4.3.2.1"
},
expectErr: true,
},

View File

@ -1004,9 +1004,9 @@ type ReplicationControllerList struct {
}
const (
// PortalIPNone - do not assign a portal IP
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
PortalIPNone = "None"
ClusterIPNone = "None"
)
// ServiceList holds a list of services.
@ -1033,7 +1033,7 @@ type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
// cluster, via the ClusterIP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
@ -1082,12 +1082,12 @@ type ServiceSpec struct {
// those endpoints.
Selector map[string]string `json:"selector"`
// PortalIP is usually assigned by the master. If specified by the user
// ClusterIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can
// not be changed by updates.
// Valid values are None, empty string (""), or a valid IP address
// None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty"`
ClusterIP string `json:"clusterIP,omitempty"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`

View File

@ -2116,7 +2116,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.ClusterIP = in.ClusterIP
out.Type = ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))
@ -4391,7 +4391,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.ClusterIP = in.ClusterIP
out.Type = api.ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -18,13 +18,14 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf"
"time"
)
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error {
@ -1864,7 +1865,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.ClusterIP = in.ClusterIP
out.Type = in.Type
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))

View File

@ -1015,7 +1015,7 @@ type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
@ -1062,12 +1062,12 @@ type ServiceSpec struct {
// This service will route traffic to pods having labels matching this selector. If null, no endpoints will be automatically created. If empty, all pods will be selected.
Selector map[string]string `json:"selector,omitempty" description:"label keys and values that must match in order to receive traffic for this service; if empty, all pods are selected, if not specified, endpoints must be manually specified"`
// PortalIP is usually assigned by the master. If specified by the user
// ClusterIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can
// not be changed by updates.
// Valid values are None, empty string (""), or a valid IP address
// None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"`
ClusterIP string `json:"clusterIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
@ -1120,9 +1120,9 @@ type Service struct {
}
const (
// PortalIPNone - do not assign a portal IP
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
PortalIPNone = "None"
ClusterIPNone = "None"
)
// ServiceList holds a list of services.

View File

@ -782,7 +782,7 @@ func addConversionFuncs() {
return err
}
out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP
out.PortalIP = in.Spec.ClusterIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err
}
@ -834,7 +834,7 @@ func addConversionFuncs() {
return err
}
out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP
out.Spec.ClusterIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err
}

View File

@ -704,7 +704,7 @@ func addConversionFuncs() {
return err
}
out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP
out.PortalIP = in.Spec.ClusterIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err
}
@ -756,7 +756,7 @@ func addConversionFuncs() {
return err
}
out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP
out.Spec.ClusterIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err
}

View File

@ -356,7 +356,7 @@ func convert_v1beta3_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Se
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.ClusterIP = in.PortalIP
typeIn := in.Type
if typeIn == "" {
@ -404,7 +404,7 @@ func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *Se
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.PortalIP = in.ClusterIP
if err := s.Convert(&in.Type, &out.Type, 0); err != nil {
return err

View File

@ -1063,8 +1063,8 @@ func ValidateService(service *api.Service) errs.ValidationErrorList {
}
if api.IsServiceIPSet(service) {
if ip := net.ParseIP(service.Spec.PortalIP); ip == nil {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "portalIP should be empty, 'None', or a valid IP address"))
if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "clusterIP should be empty, 'None', or a valid IP address"))
}
}
@ -1157,10 +1157,8 @@ func ValidateServiceUpdate(oldService, service *api.Service) errs.ValidationErro
allErrs := errs.ValidationErrorList{}
allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldService.ObjectMeta, &service.ObjectMeta).Prefix("metadata")...)
// TODO: PortalIP should be a Status field, since the system can set a value != to the user's value
// once PortalIP is set, it cannot be unset.
if api.IsServiceIPSet(oldService) && service.Spec.PortalIP != oldService.Spec.PortalIP {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "field is immutable"))
if api.IsServiceIPSet(oldService) && service.Spec.ClusterIP != oldService.Spec.ClusterIP {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "field is immutable"))
}
allErrs = append(allErrs, ValidateService(service)...)

View File

@ -1581,9 +1581,9 @@ func TestValidateService(t *testing.T) {
numErrs: 1,
},
{
name: "invalid portal ip",
name: "invalid cluster ip",
tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = "invalid"
s.Spec.ClusterIP = "invalid"
},
numErrs: 1,
},
@ -1676,16 +1676,16 @@ func TestValidateService(t *testing.T) {
numErrs: 0,
},
{
name: "valid portal ip - none ",
name: "valid cluster ip - none ",
tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = "None"
s.Spec.ClusterIP = "None"
},
numErrs: 0,
},
{
name: "valid portal ip - empty",
name: "valid cluster ip - empty",
tweakSvc: func(s *api.Service) {
s.Spec.PortalIP = ""
s.Spec.ClusterIP = ""
s.Spec.Ports[0].TargetPort = util.NewIntOrStringFromString("http")
},
numErrs: 0,
@ -2556,18 +2556,18 @@ func TestValidateServiceUpdate(t *testing.T) {
numErrs: 0,
},
{
name: "change portal IP",
name: "change cluster IP",
tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4"
newSvc.Spec.PortalIP = "8.6.7.5"
oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.ClusterIP = "8.6.7.5"
},
numErrs: 1,
},
{
name: "remove portal IP",
name: "remove cluster IP",
tweakSvc: func(oldSvc, newSvc *api.Service) {
oldSvc.Spec.PortalIP = "1.2.3.4"
newSvc.Spec.PortalIP = ""
oldSvc.Spec.ClusterIP = "1.2.3.4"
newSvc.Spec.ClusterIP = ""
},
numErrs: 1,
},

View File

@ -506,7 +506,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(service.Labels))
fmt.Fprintf(out, "Selector:\t%s\n", formatLabels(service.Spec.Selector))
fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type)
fmt.Fprintf(out, "IP:\t%s\n", service.Spec.PortalIP)
fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP)
if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress)
fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list)

View File

@ -553,7 +553,7 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool) error {
name = svc.Name
}
ips := []string{svc.Spec.PortalIP}
ips := []string{svc.Spec.ClusterIP}
ingress := svc.Status.LoadBalancer.Ingress
for i := range ingress {

View File

@ -645,7 +645,7 @@ func TestPrintHumanReadableService(t *testing.T) {
tests := []api.Service{
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
@ -668,7 +668,7 @@ func TestPrintHumanReadableService(t *testing.T) {
},
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
@ -687,7 +687,7 @@ func TestPrintHumanReadableService(t *testing.T) {
},
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
@ -715,7 +715,7 @@ func TestPrintHumanReadableService(t *testing.T) {
},
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
@ -754,9 +754,9 @@ func TestPrintHumanReadableService(t *testing.T) {
buff := bytes.Buffer{}
printService(&svc, &buff, false)
output := string(buff.Bytes())
ip := svc.Spec.PortalIP
ip := svc.Spec.ClusterIP
if !strings.Contains(output, ip) {
t.Errorf("expected to contain portal ip %s, but doesn't: %s", ip, output)
t.Errorf("expected to contain ClusterIP %s, but doesn't: %s", ip, output)
}
for _, ingress := range svc.Status.LoadBalancer.Ingress {
@ -772,7 +772,7 @@ func TestPrintHumanReadableService(t *testing.T) {
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
}
}
// Max of # ports and (# public ip + portal ip)
// Max of # ports and (# public ip + cluster ip)
count := len(svc.Spec.Ports)
if len(svc.Status.LoadBalancer.Ingress)+1 > count {
count = len(svc.Status.LoadBalancer.Ingress) + 1
@ -932,7 +932,7 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) {
obj: &api.Service{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,

View File

@ -32,16 +32,16 @@ func FromServices(services *api.ServiceList) []api.EnvVar {
for i := range services.Items {
service := &services.Items[i]
// ignore services where PortalIP is "None" or empty
// ignore services where ClusterIP is "None" or empty
// the services passed to this method should be pre-filtered
// only services that have the portal IP set should be included here
// only services that have the cluster IP set should be included here
if !api.IsServiceIPSet(service) {
continue
}
// Host
name := makeEnvVariableName(service.Name) + "_SERVICE_HOST"
result = append(result, api.EnvVar{Name: name, Value: service.Spec.PortalIP})
result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP})
// First port - give it the backwards-compatible name
name = makeEnvVariableName(service.Name) + "_SERVICE_PORT"
result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)})
@ -81,14 +81,14 @@ func makeLinkVariables(service *api.Service) []api.EnvVar {
// Docker special-cases the first port.
all = append(all, api.EnvVar{
Name: prefix + "_PORT",
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port),
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),
})
}
portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol))
all = append(all, []api.EnvVar{
{
Name: portPrefix,
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port),
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),
},
{
Name: portPrefix + "_PROTO",
@ -100,7 +100,7 @@ func makeLinkVariables(service *api.Service) []api.EnvVar {
},
{
Name: portPrefix + "_ADDR",
Value: service.Spec.PortalIP,
Value: service.Spec.ClusterIP,
},
}...)
}

View File

@ -30,8 +30,8 @@ func TestFromServices(t *testing.T) {
{
ObjectMeta: api.ObjectMeta{Name: "foo-bar"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "1.2.3.4",
Selector: map[string]string{"bar": "baz"},
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{Port: 8080, Protocol: "TCP"},
},
@ -40,8 +40,8 @@ func TestFromServices(t *testing.T) {
{
ObjectMeta: api.ObjectMeta{Name: "abc-123"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "5.6.7.8",
Selector: map[string]string{"bar": "baz"},
ClusterIP: "5.6.7.8",
Ports: []api.ServicePort{
{Name: "u-d-p", Port: 8081, Protocol: "UDP"},
{Name: "t-c-p", Port: 8081, Protocol: "TCP"},
@ -51,8 +51,8 @@ func TestFromServices(t *testing.T) {
{
ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "9.8.7.6",
Selector: map[string]string{"bar": "baz"},
ClusterIP: "9.8.7.6",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
{Name: "8083", Port: 8083, Protocol: "TCP"},
@ -60,20 +60,20 @@ func TestFromServices(t *testing.T) {
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-none"},
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "None",
Selector: map[string]string{"bar": "baz"},
ClusterIP: "None",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-empty"},
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "",
Selector: map[string]string{"bar": "baz"},
ClusterIP: "",
Ports: []api.ServicePort{
{Port: 8082, Protocol: "TCP"},
},

View File

@ -885,7 +885,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
// project the services in namespace ns onto the master services
for _, service := range services.Items {
// ignore services where PortalIP is "None" or empty
// ignore services where ClusterIP is "None" or empty
if !api.IsServiceIPSet(&service) {
continue
}

View File

@ -1260,7 +1260,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8081,
}},
PortalIP: "1.2.3.1",
ClusterIP: "1.2.3.1",
},
},
{
@ -1270,7 +1270,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8083,
}},
PortalIP: "1.2.3.3",
ClusterIP: "1.2.3.3",
},
},
{
@ -1280,7 +1280,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8084,
}},
PortalIP: "1.2.3.4",
ClusterIP: "1.2.3.4",
},
},
{
@ -1290,7 +1290,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8085,
}},
PortalIP: "1.2.3.5",
ClusterIP: "1.2.3.5",
},
},
{
@ -1300,7 +1300,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8085,
}},
PortalIP: "None",
ClusterIP: "None",
},
},
{
@ -1319,7 +1319,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8086,
}},
PortalIP: "1.2.3.6",
ClusterIP: "1.2.3.6",
},
},
{
@ -1329,7 +1329,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8088,
}},
PortalIP: "1.2.3.8",
ClusterIP: "1.2.3.8",
},
},
{
@ -1339,7 +1339,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8088,
}},
PortalIP: "None",
ClusterIP: "None",
},
},
{
@ -1349,7 +1349,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
Protocol: "TCP",
Port: 8088,
}},
PortalIP: "",
ClusterIP: "",
},
},
}

View File

@ -36,21 +36,22 @@ import (
// Controller is the controller manager for the core bootstrap Kubernetes controller
// loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default"
// namespace, and provide the IP repair check on service PortalIPs
// namespace, and provide the IP repair check on service IPs
type Controller struct {
NamespaceRegistry namespace.Registry
ServiceRegistry service.Registry
ServiceIPRegistry service.RangeRegistry
EndpointRegistry endpoint.Registry
PortalNet *net.IPNet
// TODO: MasterCount is yucky
MasterCount int
ServiceClusterIPRegistry service.RangeRegistry
ServiceClusterIPInterval time.Duration
ServiceClusterIPRange *net.IPNet
ServiceNodePortRegistry service.RangeRegistry
ServiceNodePortInterval time.Duration
ServiceNodePorts util.PortRange
ServiceNodePortRange util.PortRange
PortalIPInterval time.Duration
EndpointRegistry endpoint.Registry
EndpointInterval time.Duration
PublicIP net.IP
@ -73,11 +74,11 @@ func (c *Controller) Start() {
return
}
repairPortals := servicecontroller.NewRepair(c.PortalIPInterval, c.ServiceRegistry, c.PortalNet, c.ServiceIPRegistry)
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePorts, c.ServiceNodePortRegistry)
repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceRegistry, c.ServiceClusterIPRange, c.ServiceClusterIPRegistry)
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePortRange, c.ServiceNodePortRegistry)
// run all of the controllers once prior to returning from Start.
if err := repairPortals.RunOnce(); err != nil {
if err := repairClusterIPs.RunOnce(); err != nil {
glog.Errorf("Unable to perform initial IP allocation check: %v", err)
}
if err := repairNodePorts.RunOnce(); err != nil {
@ -90,7 +91,7 @@ func (c *Controller) Start() {
glog.Errorf("Unable to perform initial Kubernetes RO service initialization: %v", err)
}
c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairPortals.RunUntil, repairNodePorts.RunUntil)
c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil)
c.runner.Start()
}
@ -189,7 +190,7 @@ func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP n
Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}},
// maintained by this code, not by the pod selector
Selector: nil,
PortalIP: serviceIP.String(),
ClusterIP: serviceIP.String(),
SessionAffinity: api.ServiceAffinityNone,
},
}

View File

@ -85,7 +85,6 @@ type Config struct {
EventTTL time.Duration
MinionRegexp string
KubeletClient client.KubeletClient
PortalNet *net.IPNet
// allow downstream consumers to disable the core controller loops
EnableCoreControllers bool
EnableLogsSupport bool
@ -142,16 +141,19 @@ type Config struct {
// The name of the cluster.
ClusterName string
// The range of IPs to be assigned to services with type=ClusterIP or greater
ServiceClusterIPRange *net.IPNet
// The range of ports to be assigned to services with type=NodePort or greater
ServiceNodePorts util.PortRange
ServiceNodePortRange util.PortRange
}
// Master contains state for a Kubernetes cluster master/api server.
type Master struct {
// "Inputs", Copied from Config
portalNet *net.IPNet
serviceNodePorts util.PortRange
cacheTimeout time.Duration
serviceClusterIPRange *net.IPNet
serviceNodePortRange util.PortRange
cacheTimeout time.Duration
mux apiserver.Mux
muxHelper *apiserver.MuxHelper
@ -192,12 +194,12 @@ type Master struct {
// registries are internal client APIs for accessing the storage layer
// TODO: define the internal typed interface in a way that clients can
// also be replaced
nodeRegistry minion.Registry
namespaceRegistry namespace.Registry
serviceRegistry service.Registry
endpointRegistry endpoint.Registry
portalAllocator service.RangeRegistry
serviceNodePortAllocator service.RangeRegistry
nodeRegistry minion.Registry
namespaceRegistry namespace.Registry
serviceRegistry service.Registry
endpointRegistry endpoint.Registry
serviceClusterIPAllocator service.RangeRegistry
serviceNodePortAllocator service.RangeRegistry
// "Outputs"
Handler http.Handler
@ -219,26 +221,26 @@ func NewEtcdHelper(client tools.EtcdGetSet, version string, prefix string) (help
// setDefaults fills in any fields not set that are required to have valid data.
func setDefaults(c *Config) {
if c.PortalNet == nil {
if c.ServiceClusterIPRange == nil {
defaultNet := "10.0.0.0/24"
glog.Warningf("Portal net unspecified. Defaulting to %v.", defaultNet)
_, portalNet, err := net.ParseCIDR(defaultNet)
glog.Warningf("Network range for service cluster IPs is unspecified. Defaulting to %v.", defaultNet)
_, serviceClusterIPRange, err := net.ParseCIDR(defaultNet)
if err != nil {
glog.Fatalf("Unable to parse CIDR: %v", err)
}
if size := ipallocator.RangeSize(portalNet); size < 8 {
glog.Fatalf("The portal net range must be at least %d IP addresses", 8)
if size := ipallocator.RangeSize(serviceClusterIPRange); size < 8 {
glog.Fatalf("The service cluster IP range must be at least %d IP addresses", 8)
}
c.PortalNet = portalNet
c.ServiceClusterIPRange = serviceClusterIPRange
}
if c.ServiceNodePorts.Size == 0 {
if c.ServiceNodePortRange.Size == 0 {
// TODO: Currently no way to specify an empty range (do we need to allow this?)
// We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE)
// but then that breaks the strict nestedness of ServiceType.
// Review post-v1
defaultServiceNodePorts := util.PortRange{Base: 30000, Size: 2767}
c.ServiceNodePorts = defaultServiceNodePorts
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePorts)
defaultServiceNodePortRange := util.PortRange{Base: 30000, Size: 2767}
c.ServiceNodePortRange = defaultServiceNodePortRange
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePortRange)
}
if c.MasterCount == 0 {
// Clearly, there will be at least one master.
@ -273,8 +275,8 @@ func setDefaults(c *Config) {
// New returns a new instance of Master from the given config.
// Certain config fields will be set to a default value if unset,
// including:
// PortalNet
// ServiceNodePorts
// ServiceClusterIPRange
// ServiceNodePortRange
// MasterCount
// ReadOnlyPort
// ReadWritePort
@ -301,20 +303,20 @@ func New(c *Config) *Master {
glog.Fatalf("master.New() called with config.KubeletClient == nil")
}
// Select the first two valid IPs from portalNet to use as the master service portalIPs
serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.PortalNet, 1)
// Select the first two valid IPs from serviceClusterIPRange to use as the master service IPs
serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 1)
if err != nil {
glog.Fatalf("Failed to generate service read-only IP for master service: %v", err)
}
serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.PortalNet, 2)
serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 2)
if err != nil {
glog.Fatalf("Failed to generate service read-write IP for master service: %v", err)
}
glog.V(4).Infof("Setting master service IPs based on PortalNet subnet to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP)
glog.V(4).Infof("Setting master service IPs based to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP)
m := &Master{
portalNet: c.PortalNet,
serviceNodePorts: c.ServiceNodePorts,
serviceClusterIPRange: c.ServiceClusterIPRange,
serviceNodePortRange: c.ServiceNodePortRange,
rootWebService: new(restful.WebService),
enableCoreControllers: c.EnableCoreControllers,
enableLogsSupport: c.EnableLogsSupport,
@ -440,17 +442,17 @@ func (m *Master) init(c *Config) {
registry := etcd.NewRegistry(c.EtcdHelper, podRegistry, m.endpointRegistry)
m.serviceRegistry = registry
var portalRangeRegistry service.RangeRegistry
portalAllocator := ipallocator.NewAllocatorCIDRRange(m.portalNet, func(max int, rangeSpec string) allocator.Interface {
var serviceClusterIPRegistry service.RangeRegistry
serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.EtcdHelper)
portalRangeRegistry = etcd
serviceClusterIPRegistry = etcd
return etcd
})
m.portalAllocator = portalRangeRegistry
m.serviceClusterIPAllocator = serviceClusterIPRegistry
var serviceNodePortRegistry service.RangeRegistry
serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePorts, func(max int, rangeSpec string) allocator.Interface {
serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.EtcdHelper)
serviceNodePortRegistry = etcd
@ -474,7 +476,7 @@ func (m *Master) init(c *Config) {
"podTemplates": podTemplateStorage,
"replicationControllers": controllerStorage,
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, portalAllocator, serviceNodePortAllocator, c.ClusterName),
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, c.ClusterName),
"endpoints": endpointsStorage,
"minions": nodeStorage,
"minions/status": nodeStatusStorage,
@ -612,17 +614,18 @@ func (m *Master) NewBootstrapController() *Controller {
return &Controller{
NamespaceRegistry: m.namespaceRegistry,
ServiceRegistry: m.serviceRegistry,
ServiceIPRegistry: m.portalAllocator,
EndpointRegistry: m.endpointRegistry,
PortalNet: m.portalNet,
MasterCount: m.masterCount,
ServiceNodePortRegistry: m.serviceNodePortAllocator,
ServiceNodePorts: m.serviceNodePorts,
EndpointRegistry: m.endpointRegistry,
EndpointInterval: 10 * time.Second,
ServiceClusterIPRegistry: m.serviceClusterIPAllocator,
ServiceClusterIPRange: m.serviceClusterIPRange,
ServiceClusterIPInterval: 3 * time.Minute,
ServiceNodePortRegistry: m.serviceNodePortAllocator,
ServiceNodePortRange: m.serviceNodePortRange,
ServiceNodePortInterval: 3 * time.Minute,
PortalIPInterval: 3 * time.Minute,
EndpointInterval: 10 * time.Second,
PublicIP: m.clusterIP,

View File

@ -33,9 +33,13 @@ import (
"github.com/golang/glog"
)
type portal struct {
ip net.IP
port int
}
type serviceInfo struct {
portalIP net.IP
portalPort int
portal portal
protocol api.Protocol
proxyPort int
socket proxySocket
@ -252,9 +256,9 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
for i := range services {
service := &services[i]
// if PortalIP is "None" or empty, skip proxying
// if ClusterIP is "None" or empty, skip proxying
if !api.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to portal IP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.PortalIP)
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.ClusterIP)
continue
}
@ -263,7 +267,7 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
serviceName := ServicePortName{types.NamespacedName{service.Namespace, service.Name}, servicePort.Name}
activeServices[serviceName] = true
serviceIP := net.ParseIP(service.Spec.PortalIP)
serviceIP := net.ParseIP(service.Spec.ClusterIP)
info, exists := proxier.getServiceInfo(serviceName)
// TODO: check health of the socket? What if ProxyLoop exited?
if exists && sameConfig(info, service, servicePort) {
@ -287,8 +291,8 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
glog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
continue
}
info.portalIP = serviceIP
info.portalPort = servicePort.Port
info.portal.ip = serviceIP
info.portal.port = servicePort.Port
info.deprecatedPublicIPs = service.Spec.DeprecatedPublicIPs
// Deep-copy in case the service instance changes
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
@ -321,10 +325,10 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
}
func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
if info.protocol != port.Protocol || info.portalPort != port.Port || info.nodePort != port.NodePort {
if info.protocol != port.Protocol || info.portal.port != port.Port || info.nodePort != port.NodePort {
return false
}
if !info.portalIP.Equal(net.ParseIP(service.Spec.PortalIP)) {
if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) {
return false
}
if !ipsEqual(info.deprecatedPublicIPs, service.Spec.DeprecatedPublicIPs) {
@ -352,19 +356,19 @@ func ipsEqual(lhs, rhs []string) bool {
}
func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) error {
err := proxier.openOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
err := proxier.openOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
for _, publicIP := range info.deprecatedPublicIPs {
err = proxier.openOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
}
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
err = proxier.openOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
@ -379,27 +383,27 @@ func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) e
return nil
}
func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error {
func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error {
// Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort)
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
}
// Handle traffic from the host.
args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort)
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
}
return nil
}
@ -480,13 +484,13 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyI
func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) error {
// Collect errors and report them all at the end.
el := proxier.closeOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, publicIP := range info.deprecatedPublicIPs {
el = append(el, proxier.closeOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
el = append(el, proxier.closeOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
}
if info.nodePort != 0 {
@ -500,18 +504,18 @@ func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo)
return errors.NewAggregate(el)
}
func (proxier *Proxier) closeOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error {
func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error {
el := []error{}
// Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err)
}
// Handle traffic from the host.
args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err)
@ -577,7 +581,7 @@ func iptablesInit(ipt iptables.Interface) error {
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* portal_net matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle Portals; NOTE: this must be before the NodePort rules")
args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err
}

View File

@ -300,7 +300,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 80,
Protocol: "TCP",
@ -315,7 +315,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceP)
}
if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 80 || svcInfo.protocol != "TCP" {
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
}
@ -323,7 +323,7 @@ func TestMultiPortOnUpdate(t *testing.T) {
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceQ)
}
if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 81 || svcInfo.protocol != "UDP" {
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
}
@ -530,7 +530,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.proxyPort,
Protocol: "TCP",
@ -582,7 +582,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.proxyPort,
Protocol: "UDP",
@ -624,7 +624,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "TCP",
@ -671,7 +671,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "UDP",
@ -720,10 +720,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.portalPort,
Port: svcInfo.portal.port,
Protocol: "TCP",
}},
PortalIP: svcInfo.portalIP.String(),
ClusterIP: svcInfo.portal.ip.String(),
DeprecatedPublicIPs: []string{"4.3.2.1"},
},
}})
@ -769,7 +769,7 @@ func TestProxyUpdatePortal(t *testing.T) {
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.proxyPort,
Protocol: "TCP",
@ -777,12 +777,12 @@ func TestProxyUpdatePortal(t *testing.T) {
}})
_, exists := p.getServiceInfo(service)
if exists {
t.Fatalf("service with empty portalIP should not be included in the proxy")
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "None", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.proxyPort,
Protocol: "TCP",
@ -790,12 +790,12 @@ func TestProxyUpdatePortal(t *testing.T) {
}})
_, exists = p.getServiceInfo(service)
if exists {
t.Fatalf("service with 'None' as portalIP should not be included in the proxy")
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
p.OnUpdate([]api.Service{{
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: svcInfo.proxyPort,
Protocol: "TCP",
@ -803,7 +803,7 @@ func TestProxyUpdatePortal(t *testing.T) {
}})
svcInfo, exists = p.getServiceInfo(service)
if !exists {
t.Fatalf("service with portalIP set not found in the proxy")
t.Fatalf("service with ClusterIP set not found in the proxy")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)

View File

@ -27,17 +27,17 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// Repair is a controller loop that periodically examines all service PortalIP allocations
// Repair is a controller loop that periodically examines all service ClusterIP allocations
// and logs any errors, and then sets the compacted and accurate list of all allocated IPs.
//
// Handles:
// * Duplicate PortalIP assignments caused by operator action or undetected race conditions
// * PortalIPs that do not match the current portal network
// * Duplicate ClusterIP assignments caused by operator action or undetected race conditions
// * ClusterIPs that do not match the currently configured range
// * Allocations to services that were not actually created due to a crash or powerloss
// * Migrates old versions of Kubernetes services into the atomic ipallocator model automatically
//
// Can be run at infrequent intervals, and is best performed on startup of the master.
// Is level driven and idempotent - all valid PortalIPs will be updated into the ipallocator
// Is level driven and idempotent - all valid ClusterIPs will be updated into the ipallocator
// map at the end of a single execution loop if no race is encountered.
//
// TODO: allocate new IPs if necessary
@ -49,7 +49,7 @@ type Repair struct {
alloc service.RangeRegistry
}
// NewRepair creates a controller that periodically ensures that all portalIPs are uniquely allocated across the cluster
// NewRepair creates a controller that periodically ensures that all clusterIPs are uniquely allocated across the cluster
// and generates informational warnings for a cluster that is not in sync.
func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.RangeRegistry) *Repair {
return &Repair{
@ -69,7 +69,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
}, c.interval, ch)
}
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs.
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders,
@ -94,27 +94,27 @@ func (c *Repair) RunOnce() error {
if !api.IsServiceIPSet(&svc) {
continue
}
ip := net.ParseIP(svc.Spec.PortalIP)
ip := net.ParseIP(svc.Spec.ClusterIP)
if ip == nil {
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace))
// cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.ClusterIP, svc.Name, svc.Namespace))
continue
}
switch err := r.Allocate(ip); err {
case nil:
case ipallocator.ErrAllocated:
// TODO: send event
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
// cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
case ipallocator.ErrNotInRange:
// TODO: send event
// portal IP is broken, reallocate
util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
// cluster IP is broken, reallocate
util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
case ipallocator.ErrFull:
// TODO: send event
return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
default:
return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
}
}

View File

@ -121,22 +121,22 @@ func TestRepairWithExisting(t *testing.T) {
registry.List = api.ServiceList{
Items: []api.Service{
{
Spec: api.ServiceSpec{PortalIP: "192.168.1.1"},
Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"},
},
{
Spec: api.ServiceSpec{PortalIP: "192.168.1.100"},
Spec: api.ServiceSpec{ClusterIP: "192.168.1.100"},
},
{ // outside CIDR, will be dropped
Spec: api.ServiceSpec{PortalIP: "192.168.0.1"},
Spec: api.ServiceSpec{ClusterIP: "192.168.0.1"},
},
{ // empty, ignored
Spec: api.ServiceSpec{PortalIP: ""},
Spec: api.ServiceSpec{ClusterIP: ""},
},
{ // duplicate, dropped
Spec: api.ServiceSpec{PortalIP: "192.168.1.1"},
Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"},
},
{ // headless
Spec: api.ServiceSpec{PortalIP: "None"},
Spec: api.ServiceSpec{ClusterIP: "None"},
},
},
}

View File

@ -46,19 +46,19 @@ type REST struct {
registry Registry
machines minion.Registry
endpoints endpoint.Registry
portals ipallocator.Interface
serviceIPs ipallocator.Interface
serviceNodePorts portallocator.Interface
clusterName string
}
// NewStorage returns a new REST.
func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, portals ipallocator.Interface,
func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, serviceIPs ipallocator.Interface,
serviceNodePorts portallocator.Interface, clusterName string) *REST {
return &REST{
registry: registry,
machines: machines,
endpoints: endpoints,
portals: portals,
serviceIPs: serviceIPs,
serviceNodePorts: serviceNodePorts,
clusterName: clusterName,
}
@ -75,7 +75,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
defer func() {
if releaseServiceIP {
if api.IsServiceIPSet(service) {
rs.portals.Release(net.ParseIP(service.Spec.PortalIP))
rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
}
}
}()
@ -85,17 +85,17 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
if api.IsServiceIPRequested(service) {
// Allocate next available.
ip, err := rs.portals.AllocateNext()
ip, err := rs.serviceIPs.AllocateNext()
if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())}
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
return nil, errors.NewInvalid("Service", service.Name, el)
}
service.Spec.PortalIP = ip.String()
service.Spec.ClusterIP = ip.String()
releaseServiceIP = true
} else if api.IsServiceIPSet(service) {
// Try to respect the requested IP.
if err := rs.portals.Allocate(net.ParseIP(service.Spec.PortalIP)); err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())}
if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
return nil, errors.NewInvalid("Service", service.Name, el)
}
releaseServiceIP = true
@ -150,7 +150,7 @@ func (rs *REST) Delete(ctx api.Context, id string) (runtime.Object, error) {
}
if api.IsServiceIPSet(service) {
rs.portals.Release(net.ParseIP(service.Spec.PortalIP))
rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
}
for _, nodePort := range CollectServiceNodePorts(service) {

View File

@ -96,8 +96,8 @@ func TestServiceRegistryCreate(t *testing.T) {
if created_service.CreationTimestamp.IsZero() {
t.Errorf("Expected timestamp to be set, got: %v", created_service.CreationTimestamp)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
}
srv, err := registry.GetService(ctx, svc.Name)
if err != nil {
@ -517,8 +517,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if created_service_1.Name != "foo" {
t.Errorf("Expected foo, but got %v", created_service_1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP)
}
svc2 := &api.Service{
@ -538,14 +538,14 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if created_service_2.Name != "bar" {
t.Errorf("Expected bar, but got %v", created_service_2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP)
}
testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"}
testIP := ""
for _, ip := range testIPs {
if !rest.portals.(*ipallocator.Range).Has(net.ParseIP(ip)) {
if !rest.serviceIPs.(*ipallocator.Range).Has(net.ParseIP(ip)) {
testIP = ip
}
}
@ -554,7 +554,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "quux"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: testIP,
ClusterIP: testIP,
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
@ -569,8 +569,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
t.Fatal(err)
}
created_service_3 := created_svc3.(*api.Service)
if created_service_3.Spec.PortalIP != testIP { // specific IP
t.Errorf("Unexpected PortalIP: %s", created_service_3.Spec.PortalIP)
if created_service_3.Spec.ClusterIP != testIP { // specific IP
t.Errorf("Unexpected ClusterIP: %s", created_service_3.Spec.ClusterIP)
}
}
@ -595,8 +595,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if created_service_1.Name != "foo" {
t.Errorf("Expected foo, but got %v", created_service_1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP)
}
_, err := rest.Delete(ctx, created_service_1.Name)
@ -622,8 +622,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if created_service_2.Name != "bar" {
t.Errorf("Expected bar, but got %v", created_service_2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP)
}
}
@ -648,8 +648,8 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
if created_service.Spec.Ports[0].Port != 6502 {
t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
}
update := deepCloneService(created_service)
@ -663,7 +663,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
update = deepCloneService(created_service)
update.Spec.Ports[0].Port = 6503
update.Spec.PortalIP = "1.2.3.76" // error
update.Spec.ClusterIP = "1.2.3.76" // error
_, _, err := rest.Update(ctx, update)
if err == nil || !errors.IsInvalid(err) {
@ -692,8 +692,8 @@ func TestServiceRegistryIPLoadBalancer(t *testing.T) {
if created_service.Spec.Ports[0].Port != 6502 {
t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) {
t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP)
if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) {
t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP)
}
update := deepCloneService(created_service)
@ -750,7 +750,7 @@ func TestCreate(t *testing.T) {
&api.Service{
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "None",
ClusterIP: "None",
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
@ -767,7 +767,7 @@ func TestCreate(t *testing.T) {
&api.Service{
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
PortalIP: "invalid",
ClusterIP: "invalid",
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{

View File

@ -428,7 +428,7 @@ func chooseHostInterfaceNativeGo() (net.IP, error) {
if ip == nil {
return nil, fmt.Errorf("no acceptable interface from host")
}
glog.V(4).Infof("Choosing interface %s for from-host portals", intfs[i].Name)
glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip)
return ip, nil
}

View File

@ -209,7 +209,7 @@ var _ = Describe("DNS", func() {
Name: testServiceName,
},
Spec: api.ServiceSpec{
PortalIP: "None",
ClusterIP: "None",
Ports: []api.ServicePort{
{Port: 80},
},