Update OpenShift from 1.3.0-alpha.3 to 1.5.0-alpha.3

This is primarily to get the signature access docker/distribution API
extension.

To make it work, two updates to the test harness are necessary:

- Change the expected output of (oadm policy add-cluster-role-to-group)
- Don't expect (openshift start master) to create .kubeconfig files
  for the registry service.

  As of https://github.com/openshift/origin/pull/10830 ,
  openshift.local.config/master/openshift-registry.kubeconfig is no longer
  autogenerated.  Instead, do what (oadm registry) does, creating a
  service account and a cluster policy role binding.  Then manually create
  the necessary certificates and a .kubeconfig instead of using the
  service account in a pod.
This commit is contained in:
Miloslav Trmač 2017-03-15 17:32:08 +01:00
parent 9d88725a97
commit 96e579720e
2 changed files with 27 additions and 5 deletions

View File

@ -46,7 +46,7 @@ RUN set -x \
RUN set -x \
&& yum install -y which git tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
&& export GOPATH=$(mktemp -d) \
&& git clone -b v1.3.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
&& git clone -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \

View File

@ -126,13 +126,35 @@ func (c *openshiftCluster) startMaster() {
// startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure.
func (c *openshiftCluster) startRegistry() {
//KUBECONFIG=openshift.local.config/master/openshift-registry.kubeconfig DOCKER_REGISTRY_URL=127.0.0.1:5000
// This partially mimics the objects created by (oadm registry), except that we run the
// server directly as an ordinary process instead of a pod with an implicitly attached service account.
saJSON := `{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "registry"
}
}`
cmd := c.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-")
runExecCmdWithInput(c.c, cmd, saJSON)
cmd = c.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry")
out, err := cmd.CombinedOutput()
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n")
cmd = c.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry")
out, err = cmd.CombinedOutput()
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.c.Assert(string(out), check.Equals, "")
//KUBECONFIG=openshift.local.registry/openshift-registry.kubeconfig DOCKER_REGISTRY_URL=127.0.0.1:5000
c.registry = c.clusterCmd(map[string]string{
"KUBECONFIG": "openshift.local.config/master/openshift-registry.kubeconfig",
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
"DOCKER_REGISTRY_URL": "127.0.0.1:5000",
}, "dockerregistry", "/atomic-registry-config.yml")
consumeAndLogOutputs(c.c, "registry", c.registry)
err := c.registry.Start()
err = c.registry.Start()
c.c.Assert(err, check.IsNil)
portOpen, terminatePortCheck := newPortChecker(c.c, 5000)
@ -185,7 +207,7 @@ func (c *openshiftCluster) relaxImageSignerPermissions() {
cmd := c.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
out, err := cmd.CombinedOutput()
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.c.Assert(string(out), check.Equals, "")
c.c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n")
}
// tearDown stops the cluster services and deletes (only some!) of the state.