Replace gopkg.in/check.v1 by github.com/stretchr/testify/suite/

gopkg.in/check.v1 hasn't had any commit since Nov 2020.
That's not a immediate issue for a test-only dependency, but
because it hides access to the standard library *testing.T,
eventually it will become limiting.

Also, using the same framework for unit and integration tests
seems practical.

This is mostly a batch copy&paste job, with a fairly high risk
of unexpected breakage.

Also, I didn't take much time at all to carefully choose between
assert.* and require.*; we can tune that as failures show up.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač
2023-02-15 23:39:03 +01:00
parent c0c7065737
commit 2ef9cf6902
45 changed files with 1389 additions and 4507 deletions

View File

@@ -9,10 +9,11 @@ import (
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/containers/storage/pkg/homedir"
"gopkg.in/check.v1"
"github.com/stretchr/testify/require"
)
var adminKUBECONFIG = map[string]string{
@@ -30,16 +31,16 @@ type openshiftCluster struct {
// startOpenshiftCluster creates a new openshiftCluster.
// WARNING: This affects state in users' home directory! Only run
// in isolated test environment.
func startOpenshiftCluster(c *check.C) *openshiftCluster {
func startOpenshiftCluster(t *testing.T) *openshiftCluster {
cluster := &openshiftCluster{}
cluster.workingDir = c.MkDir()
cluster.workingDir = t.TempDir()
cluster.startMaster(c)
cluster.prepareRegistryConfig(c)
cluster.startRegistry(c)
cluster.ocLoginToProject(c)
cluster.dockerLogin(c)
cluster.relaxImageSignerPermissions(c)
cluster.startMaster(t)
cluster.prepareRegistryConfig(t)
cluster.startRegistry(t)
cluster.ocLoginToProject(t)
cluster.dockerLogin(t)
cluster.relaxImageSignerPermissions(t)
return cluster
}
@@ -56,21 +57,20 @@ func (cluster *openshiftCluster) clusterCmd(env map[string]string, name string,
}
// startMaster starts the OpenShift master (etcd+API server) and waits for it to be ready, or terminates on failure.
func (cluster *openshiftCluster) startMaster(c *check.C) {
func (cluster *openshiftCluster) startMaster(t *testing.T) {
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
cluster.processes = append(cluster.processes, cmd)
stdout, err := cmd.StdoutPipe()
c.Assert(err, check.IsNil)
// Send both to the same pipe. This might cause the two streams to be mixed up,
require.NoError(t, err)
// but logging actually goes only to stderr - this primarily ensure we log any
// unexpected output to stdout.
cmd.Stderr = cmd.Stdout
err = cmd.Start()
c.Assert(err, check.IsNil)
require.NoError(t, err)
portOpen, terminatePortCheck := newPortChecker(c, 8443)
portOpen, terminatePortCheck := newPortChecker(t, 8443)
defer func() {
c.Logf("Terminating port check")
t.Logf("Terminating port check")
terminatePortCheck <- true
}()
@@ -78,12 +78,12 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
logCheckFound := make(chan bool)
go func() {
defer func() {
c.Logf("Log checker exiting")
t.Logf("Log checker exiting")
}()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
line := scanner.Text()
c.Logf("Log line: %s", line)
t.Logf("Log line: %s", line)
if strings.Contains(line, "Started Origin Controllers") {
logCheckFound <- true
return
@@ -92,7 +92,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
// Note: we can block before we get here.
select {
case <-terminateLogCheck:
c.Logf("terminated")
t.Logf("terminated")
return
default:
// Do not block here and read the next line.
@@ -101,7 +101,7 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
logCheckFound <- false
}()
defer func() {
c.Logf("Terminating log check")
t.Logf("Terminating log check")
terminateLogCheck <- true
}()
@@ -110,26 +110,26 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
for !gotPortCheck || !gotLogCheck {
c.Logf("Waiting for master")
t.Logf("Waiting for master")
select {
case <-portOpen:
c.Logf("port check done")
t.Logf("port check done")
gotPortCheck = true
case found := <-logCheckFound:
c.Logf("log check done, found: %t", found)
t.Logf("log check done, found: %t", found)
if !found {
c.Fatal("log check done, success message not found")
t.Fatal("log check done, success message not found")
}
gotLogCheck = true
case <-ctx.Done():
c.Fatalf("Timed out waiting for master: %v", ctx.Err())
t.Fatalf("Timed out waiting for master: %v", ctx.Err())
}
}
c.Logf("OK, master started!")
t.Logf("OK, master started!")
}
// prepareRegistryConfig creates a registry service account and a related k8s client configuration in ${cluster.workingDir}/openshift.local.registry.
func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) {
func (cluster *openshiftCluster) prepareRegistryConfig(t *testing.T) {
// This partially mimics the objects created by (oadm registry), except that we run the
// server directly as an ordinary process instead of a pod with an implicitly attached service account.
saJSON := `{
@@ -140,93 +140,93 @@ func (cluster *openshiftCluster) prepareRegistryConfig(c *check.C) {
}
}`
cmd := cluster.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-")
runExecCmdWithInput(c, cmd, saJSON)
runExecCmdWithInput(t, cmd, saJSON)
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry")
out, err := cmd.CombinedOutput()
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n")
require.NoError(t, err, "%s", string(out))
require.Equal(t, "cluster role \"system:registry\" added: \"registry\"\n", string(out))
cmd = cluster.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry")
out, err = cmd.CombinedOutput()
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.Assert(string(out), check.Equals, "")
require.NoError(t, err, "%s", string(out))
require.Equal(t, "", string(out))
}
// startRegistry starts the OpenShift registry with configPart on port, waits for it to be ready, and returns the process object, or terminates on failure.
func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port uint16, configPath string) *exec.Cmd {
func (cluster *openshiftCluster) startRegistryProcess(t *testing.T, port uint16, configPath string) *exec.Cmd {
cmd := cluster.clusterCmd(map[string]string{
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
"DOCKER_REGISTRY_URL": fmt.Sprintf("127.0.0.1:%d", port),
}, "dockerregistry", configPath)
consumeAndLogOutputs(c, fmt.Sprintf("registry-%d", port), cmd)
consumeAndLogOutputs(t, fmt.Sprintf("registry-%d", port), cmd)
err := cmd.Start()
c.Assert(err, check.IsNil)
require.NoError(t, err, "%s")
portOpen, terminatePortCheck := newPortChecker(c, port)
portOpen, terminatePortCheck := newPortChecker(t, port)
defer func() {
terminatePortCheck <- true
}()
c.Logf("Waiting for registry to start")
t.Logf("Waiting for registry to start")
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
select {
case <-portOpen:
c.Logf("OK, Registry port open")
t.Logf("OK, Registry port open")
case <-ctx.Done():
c.Fatalf("Timed out waiting for registry to start: %v", ctx.Err())
t.Fatalf("Timed out waiting for registry to start: %v", ctx.Err())
}
return cmd
}
// startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure.
func (cluster *openshiftCluster) startRegistry(c *check.C) {
func (cluster *openshiftCluster) startRegistry(t *testing.T) {
// Our “primary” registry
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5000, "/atomic-registry-config.yml"))
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5000, "/atomic-registry-config.yml"))
// A registry configured with acceptschema2:false
schema1Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
schema1Config := fileFromFixture(t, "/atomic-registry-config.yml", map[string]string{
"addr: :5000": "addr: :5005",
"rootdirectory: /registry": "rootdirectory: /registry-schema1",
// The default configuration currently already contains acceptschema2: false
})
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
configContents, err := os.ReadFile(schema1Config)
c.Assert(err, check.IsNil)
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
require.NoError(t, err)
require.Regexp(t, "(?s).*acceptschema2: false.*", string(configContents))
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5005, schema1Config))
// A registry configured with acceptschema2:true
schema2Config := fileFromFixture(c, "/atomic-registry-config.yml", map[string]string{
schema2Config := fileFromFixture(t, "/atomic-registry-config.yml", map[string]string{
"addr: :5000": "addr: :5006",
"rootdirectory: /registry": "rootdirectory: /registry-schema2",
"acceptschema2: false": "acceptschema2: true",
})
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5006, schema2Config))
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5006, schema2Config))
}
// ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
func (cluster *openshiftCluster) ocLoginToProject(c *check.C) {
c.Logf("oc login")
func (cluster *openshiftCluster) ocLoginToProject(t *testing.T) {
t.Logf("oc login")
cmd := cluster.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")
out, err := cmd.CombinedOutput()
c.Assert(err, check.IsNil, check.Commentf("%s", out))
c.Assert(string(out), check.Matches, "(?s).*Login successful.*") // (?s) : '.' will also match newlines
require.NoError(t, err, "%s", out)
require.Regexp(t, "(?s).*Login successful.*", string(out)) // (?s) : '.' will also match newlines
outString := combinedOutputOfCommand(c, "oc", "new-project", "myns")
c.Assert(outString, check.Matches, `(?s).*Now using project "myns".*`) // (?s) : '.' will also match newlines
outString := combinedOutputOfCommand(t, "oc", "new-project", "myns")
require.Regexp(t, `(?s).*Now using project "myns".*`, outString) // (?s) : '.' will also match newlines
}
// dockerLogin simulates (docker login) to the cluster, or terminates on failure.
// We do not run (docker login) directly, because that requires a running daemon and a docker package.
func (cluster *openshiftCluster) dockerLogin(c *check.C) {
func (cluster *openshiftCluster) dockerLogin(t *testing.T) {
cluster.dockerDir = filepath.Join(homedir.Get(), ".docker")
err := os.Mkdir(cluster.dockerDir, 0700)
c.Assert(err, check.IsNil)
require.NoError(t, err)
out := combinedOutputOfCommand(c, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}")
c.Logf("oc config value: %s", out)
out := combinedOutputOfCommand(t, "oc", "config", "view", "-o", "json", "-o", "jsonpath={.users[*].user.token}")
t.Logf("oc config value: %s", out)
authValue := base64.StdEncoding.EncodeToString([]byte("unused:" + out))
auths := []string{}
for _, port := range []int{5000, 5005, 5006} {
@@ -237,22 +237,22 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) {
}
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
c.Assert(err, check.IsNil)
require.NoError(t, err)
}
// relaxImageSignerPermissions opens up the system:image-signer permissions so that
// anyone can work with signatures
// FIXME: This also allows anyone to DoS anyone else; this design is really not all
// that workable, but it is the best we can do for now.
func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
func (cluster *openshiftCluster) relaxImageSignerPermissions(t *testing.T) {
cmd := cluster.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
out, err := cmd.CombinedOutput()
c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n")
require.NoError(t, err, "%s", string(out))
require.Equal(t, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n", string(out))
}
// tearDown stops the cluster services and deletes (only some!) of the state.
func (cluster *openshiftCluster) tearDown(c *check.C) {
func (cluster *openshiftCluster) tearDown(t *testing.T) {
for i := len(cluster.processes) - 1; i >= 0; i-- {
// Its undocumented what Kill() returns if the process has terminated,
// so we couldnt check just for that. This is running in a container anyway…
@@ -260,6 +260,6 @@ func (cluster *openshiftCluster) tearDown(c *check.C) {
}
if cluster.dockerDir != "" {
err := os.RemoveAll(cluster.dockerDir)
c.Assert(err, check.IsNil)
require.NoError(t, err)
}
}