Add e2e test for CockroachDB statefulset

This commit is contained in:
Janet Kuo 2016-11-14 17:42:04 -08:00
parent c5c461df38
commit 2079a52950
3 changed files with 256 additions and 53 deletions

View File

@ -54,6 +54,7 @@ const (
zookeeperManifestPath = "test/e2e/testing-manifests/petset/zookeeper"
mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera"
redisManifestPath = "test/e2e/testing-manifests/petset/redis"
cockroachDBManifestPath = "test/e2e/testing-manifests/petset/cockroachdb"
// Should the test restart statefulset clusters?
// TODO: enable when we've productionzed bringup of pets in this e2e.
restartCluster = false
@ -174,6 +175,14 @@ var _ = framework.KubeDescribe("StatefulSet [Slow] [Feature:PetSet]", func() {
})
framework.KubeDescribe("Deploy clustered applications [Slow] [Feature:PetSet]", func() {
var pst *statefulSetTester
var appTester *clusterAppTester
BeforeEach(func() {
pst = &statefulSetTester{c: c}
appTester = &clusterAppTester{tester: pst, ns: ns}
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
dumpDebugInfo(c, ns)
@ -183,66 +192,23 @@ var _ = framework.KubeDescribe("StatefulSet [Slow] [Feature:PetSet]", func() {
})
It("should creating a working zookeeper cluster [Feature:PetSet]", func() {
pst := &statefulSetTester{c: c}
pet := &zookeeperTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
if restartCluster {
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
}
By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(pet, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err)
}
appTester.pet = &zookeeperTester{tester: pst}
appTester.run()
})
It("should creating a working redis cluster [Feature:PetSet]", func() {
pst := &statefulSetTester{c: c}
pet := &redisTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
if restartCluster {
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
}
By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(pet, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err)
}
appTester.pet = &redisTester{tester: pst}
appTester.run()
})
It("should creating a working mysql cluster [Feature:PetSet]", func() {
pst := &statefulSetTester{c: c}
pet := &mysqlGaleraTester{tester: pst}
By("Deploying " + pet.name())
ps := pet.deploy(ns)
appTester.pet = &mysqlGaleraTester{tester: pst}
appTester.run()
})
By("Creating foo:bar in member with index 0")
pet.write(0, map[string]string{"foo": "bar"})
if restartCluster {
By("Restarting pet set " + ps.Name)
pst.restart(ps)
pst.waitForRunning(ps.Spec.Replicas, ps)
}
By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(pet, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err)
}
It("should creating a working CockroachDB cluster [Feature:PetSet]", func() {
appTester.pet = &cockroachDBTester{tester: pst}
appTester.run()
})
})
})
@ -392,6 +358,31 @@ type petTester interface {
name() string
}
type clusterAppTester struct {
ns string
pet petTester
tester *statefulSetTester
}
func (c *clusterAppTester) run() {
By("Deploying " + c.pet.name())
ps := c.pet.deploy(c.ns)
By("Creating foo:bar in member with index 0")
c.pet.write(0, map[string]string{"foo": "bar"})
if restartCluster {
By("Restarting stateful set " + ps.Name)
c.tester.restart(ps)
c.tester.waitForRunning(ps.Spec.Replicas, ps)
}
By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(c.pet, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err)
}
}
type zookeeperTester struct {
ps *apps.StatefulSet
tester *statefulSetTester
@ -496,6 +487,44 @@ func (m *redisTester) read(petIndex int, key string) string {
return lastLine(m.redisExec(fmt.Sprintf("GET %v", key), m.ps.Namespace, name))
}
type cockroachDBTester struct {
ps *apps.StatefulSet
tester *statefulSetTester
}
func (c *cockroachDBTester) name() string {
return "CockroachDB"
}
func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/cockroach/cockroach sql --host %s.cockroachdb -e \"%v\"", podName, cmd)
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet {
c.ps = c.tester.createStatefulSet(cockroachDBManifestPath, ns)
framework.Logf("Deployed statefulset %v, initializing database", c.ps.Name)
for _, cmd := range []string{
"CREATE DATABASE IF NOT EXISTS foo;",
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
} {
framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ps.Name)))
}
return c.ps
}
func (c *cockroachDBTester) write(petIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", c.ps.Name, petIndex)
for k, v := range kv {
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
framework.Logf(c.cockroachDBExec(cmd, c.ps.Namespace, name))
}
}
func (c *cockroachDBTester) read(petIndex int, key string) string {
name := fmt.Sprintf("%v-%d", c.ps.Name, petIndex)
return lastLine(c.cockroachDBExec(fmt.Sprintf("SELECT v FROM foo.bar WHERE k='%v';", key), c.ps.Namespace, name))
}
func lastLine(out string) string {
outLines := strings.Split(strings.Trim(out, "\n"), "\n")
return outLines[len(outLines)-1]

View File

@ -0,0 +1,119 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
template:
metadata:
labels:
app: cockroachdb
annotations:
pod.alpha.kubernetes.io/initialized: "true"
# Init containers are run only once in the lifetime of a pod, before
# it's started up for the first time. It has to exit successfully
# before the pod's main containers are allowed to start.
# This particular init container does a DNS lookup for other pods in
# the set to help determine whether or not a cluster already exists.
# If any other pods exist, it creates a file in the cockroach-data
# directory to pass that information along to the primary container that
# has to decide what command-line flags to use when starting CockroachDB.
# This only matters when a pod's persistent volume is empty - if it has
# data from a previous execution, that data will always be used.
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "bootstrap",
"image": "cockroachdb/cockroach-k8s-init:0.1",
"args": [
"-on-start=/on-start.sh",
"-service=cockroachdb"
],
"env": [
{
"name": "POD_NAMESPACE",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"volumeMounts": [
{
"name": "datadir",
"mountPath": "/cockroach/cockroach-data"
}
]
}
]'
spec:
containers:
- name: cockroachdb
# Runs the master branch. Not recommended for production, but since
# CockroachDB is in Beta, you don't want to run it in production
# anyway. See
# https://hub.docker.com/r/cockroachdb/cockroach/tags/
# if you prefer to run a beta release.
image: cockroachdb/cockroach
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
livenessProbe:
httpGet:
path: /_admin/v1/health
port: http
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /_admin/v1/health
port: http
initialDelaySeconds: 10
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
command:
- "/bin/bash"
- "-ecx"
- |
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
# We only want to initialize a new cluster (by omitting the join flag)
# if we're sure that we're the first node (i.e. index 0) and that
# there aren't any other nodes running as part of the cluster that
# this is supposed to be a part of (which indicates that a cluster
# already exists and we should make sure not to create a new one).
# It's fine to run without --join on a restart if there aren't any
# other nodes.
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
then
# We don't join cockroachdb in order to avoid a node attempting
# to join itself, which currently doesn't work
# (https://github.com/cockroachdb/cockroach/issues/9625).
CRARGS+=("--join" "cockroachdb-public")
fi
exec /cockroach/cockroach ${CRARGS[*]}
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,55 @@
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# This is needed to make the peer-finder work properly and to help avoid
# edge cases where instance 0 comes up after losing its data and needs to
# decide whether it should create a new cluster or try to join an existing
# one. If it creates a new cluster when it should have joined an existing
# one, we'd end up with two separate clusters listening at the same service
# endpoint, which would be very bad.
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
clusterIP: None
selector:
app: cockroachdb