diff --git a/cluster/kubemark/config-default.sh b/cluster/kubemark/config-default.sh index 59142f86a37..559260ed5a0 100644 --- a/cluster/kubemark/config-default.sh +++ b/cluster/kubemark/config-default.sh @@ -15,7 +15,7 @@ # limitations under the License. # A configuration for Kubemark cluster. It doesn't need to be kept in -# sync with gce/config-default.sh (except the filename, because I'm reusing +# sync with gce/config-default.sh (except the filename, because I'm reusing # gce/util.sh script which assumes config filename), but if some things that # are enabled by default should not run in hollow clusters, they should be disabled here. diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index fef3f990983..06183fb68f4 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -26,8 +26,8 @@ import ( docker "github.com/fsouza/go-dockerclient" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/latest" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockertools" @@ -42,6 +42,7 @@ var ( fakeDockerClient dockertools.FakeDockerClient apiServer string + kubeconfigPath string kubeletPort int kubeletReadOnlyPort int nodeName string @@ -50,6 +51,7 @@ var ( func addFlags(fs *pflag.FlagSet) { fs.StringVar(&apiServer, "server", "", "API server IP.") + fs.StringVar(&kubeconfigPath, "kubeconfig", "/kubeconfig/kubeconfig", "Path to kubeconfig file.") fs.IntVar(&kubeletPort, "kubelet-port", 10250, "Port on which HollowKubelet should be listening.") fs.IntVar(&kubeletReadOnlyPort, "kubelet-read-only-port", 10255, "Read-only port on which Kubelet is listening.") fs.StringVar(&nodeName, "name", "fake-node", "Name of this Hollow Node.") @@ -70,13 +72,35 @@ func makeTempDirOrDie(prefix string, baseDir string) string { return tempDir } +func createClientFromFile(path string) (*client.Client, error) { + c, err := clientcmd.LoadFromFile(path) + if err != nil { + return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", path, err) + } + config, err := clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, fmt.Errorf("error while creating kubeconfig: %v", err) + } + client, err := client.New(config) + if err != nil { + return nil, fmt.Errorf("error while creating client: %v", err) + } + if client.Timeout == 0 { + client.Timeout = 30 * time.Second + } + return client, nil +} + func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() // create a client for Kubelet to communicate with API server. - cl := client.NewOrDie(&client.Config{Host: fmt.Sprintf("http://%v:%v", apiServer, serverPort), Version: latest.GroupOrDie("").Version}) + cl, err := createClientFromFile(kubeconfigPath) + if err != nil { + glog.Fatal("Failed to create a Client. Exiting.") + } cadvisorInterface := new(cadvisor.Fake) testRootDir := makeTempDirOrDie("hollow-kubelet.", "") diff --git a/test/kubemark/configure-kubectl.sh b/test/kubemark/configure-kubectl.sh index 210fd8bccbd..0adca7f3173 100644 --- a/test/kubemark/configure-kubectl.sh +++ b/test/kubemark/configure-kubectl.sh @@ -18,5 +18,5 @@ curl https://sdk.cloud.google.com | bash sudo gcloud components update kubectl -q sudo ln -s /usr/local/share/google/google-cloud-sdk/bin/kubectl /bin/ kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true -kubectl config set-credentials `whoami` -kubectl config set-context hollow-context --cluster=hollow-cluster --user=`whoami` +kubectl config set-credentials $(whoami) +kubectl config set-context hollow-context --cluster=hollow-cluster --user=$(whoami) diff --git a/test/kubemark/hollow-kubelet_template.json b/test/kubemark/hollow-kubelet_template.json index e6fee4bef9a..d9e5a8e9fe9 100644 --- a/test/kubemark/hollow-kubelet_template.json +++ b/test/kubemark/hollow-kubelet_template.json @@ -19,6 +19,14 @@ } }, "spec": { + "volumes": [ + { + "name": "kubeconfig-volume", + "secret": { + "secretName": "kubeconfig" + } + } + ], "containers": [{ "name": "hollow-kubelet", "image": "gcr.io/##project##/kubemark:latest", @@ -41,6 +49,12 @@ "--api-server-port=8080", "--v=3" ], + "volumeMounts": [ + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ], "resources": { "requests": { "cpu": "50m", diff --git a/test/kubemark/run-scalability-test.sh b/test/kubemark/run-scalability-test.sh index b01b3dff209..b64b872c0bb 100755 --- a/test/kubemark/run-scalability-test.sh +++ b/test/kubemark/run-scalability-test.sh @@ -14,11 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. - export KUBERNETES_PROVIDER="kubemark" export KUBE_CONFIG_FILE="config-default.sh" +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. + +# We need an absolute path to KUBE_ROOT +ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT}) + source ${KUBE_ROOT}/cluster/kubemark/util.sh source ${KUBE_ROOT}/cluster/kubemark/config-default.sh @@ -27,6 +30,7 @@ echo ${MASTER_NAME} detect-master -export KUBE_MASTER_URL="http://${KUBE_MASTER_IP:-}:8080" +export KUBE_MASTER_URL="https://${KUBE_MASTER_IP}" +export KUBECONFIG="${ABSOLUTE_ROOT}/test/kubemark/kubeconfig.loc" ${KUBE_ROOT}/hack/ginkgo-e2e.sh --e2e-verify-service-account=false --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" diff --git a/test/kubemark/start-kubemark-master.sh b/test/kubemark/start-kubemark-master.sh index ba7de8c1a8f..e54da52ba08 100644 --- a/test/kubemark/start-kubemark-master.sh +++ b/test/kubemark/start-kubemark-master.sh @@ -22,10 +22,21 @@ ulimit -n 65536 tar xzf kubernetes-server-linux-amd64.tar.gz -kubernetes/server/bin/kube-controller-manager --master=127.0.0.1:8080 --v=2 &> /tmp/kube-controller-manager.log & +kubernetes/server/bin/kube-controller-manager --master=127.0.0.1:8080 --service-account-private-key-file=/srv/kubernetes/server.key --root-ca-file=/srv/kubernetes/ca.crt --v=2 &> /tmp/kube-controller-manager.log & kubernetes/server/bin/kube-scheduler --master=127.0.0.1:8080 --v=2 &> /tmp/kube-scheduler.log & -kubernetes/server/bin/kube-apiserver --portal-net=10.0.0.1/24 --address=0.0.0.0 --etcd-servers=http://127.0.0.1:4001 --cluster-name=hollow-kubernetes --v=2 &> /tmp/kube-apiserver.log & +kubernetes/server/bin/kube-apiserver \ + --portal-net=10.0.0.1/24 \ + --address=0.0.0.0 \ + --etcd-servers=http://127.0.0.1:4001 \ + --cluster-name=hollow-kubernetes \ + --v=4 \ + --tls-cert-file=/srv/kubernetes/server.cert \ + --tls-private-key-file=/srv/kubernetes/server.key \ + --client-ca-file=/srv/kubernetes/ca.crt \ + --token-auth-file=/srv/kubernetes/known_tokens.csv \ + --secure-port=443 \ + --basic-auth-file=/srv/kubernetes/basic_auth.csv &> /tmp/kube-apiserver.log & rm -rf kubernetes diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index fdb0510a38a..3125f1dac04 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -63,12 +63,33 @@ gcloud compute instances create ${MASTER_NAME} \ --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" MASTER_IP=$(gcloud compute instances describe hollow-cluster-master \ - --zone="${ZONE}" --project="${PROJECT}" | grep networkIP | cut -f2 -d":" | sed "s/ //g") + --zone="${ZONE}" --project="${PROJECT}" | grep natIP: | cut -f2 -d":" | sed "s/ //g") + +ensure-temp-dir +gen-kube-bearertoken +create-certs ${MASTER_IP} +KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) +KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + +echo "${CA_CERT_BASE64}" | base64 -d > ca.crt +echo "${KUBECFG_CERT_BASE64}" | base64 -d > kubecfg.crt +echo "${KUBECFG_KEY_BASE64}" | base64 -d > kubecfg.key until gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" hollow-cluster-master --command="ls" &> /dev/null; do sleep 1 done +gcloud compute ssh --zone=${ZONE} hollow-cluster-master --command="sudo mkdir /srv/kubernetes -p && \ +sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 -d > /srv/kubernetes/server.cert\" && \ +sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 -d > /srv/kubernetes/server.key\" && \ +sudo bash -c \"echo ${CA_CERT_BASE64} | base64 -d > /srv/kubernetes/ca.crt\" && \ +sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 -d > /srv/kubernetes/kubecfg.crt\" && \ +sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 -d > /srv/kubernetes/kubecfg.key\" && \ +sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /srv/kubernetes/known_tokens.csv\" && \ +sudo bash -c \"echo \"${KUBELET_TOKEN},kubelet,kubelet\" >> /srv/kubernetes/known_tokens.csv\" && \ +sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /srv/kubernetes/known_tokens.csv\" && \ +sudo bash -c \"echo admin,admin,admin > /srv/kubernetes/basic_auth.csv\"" + if [ "${RUN_FROM_DISTRO}" == "false" ]; then gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \ "${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" \ @@ -83,11 +104,73 @@ else "hollow-cluster-master":~ fi -gcloud compute ssh --zone=${ZONE} --project="${PROJECT}" hollow-cluster-master \ +gcloud compute ssh hollow-cluster-master --zone=${ZONE} --project="${PROJECT}" \ --command="chmod a+x configure-kubectl.sh && chmod a+x start-kubemark-master.sh && sudo ./start-kubemark-master.sh" +# create kubeconfig for Kubelet: +KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + client-certificate-data: ${KUBELET_CERT_BASE64} + client-key-data: ${KUBELET_KEY_BASE64} +clusters: +- name: kubemark + cluster: + certificate-authority-data: ${CA_CERT_BASE64} + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: kubelet + name: kubemark-context +current-context: kubemark-context" | base64 | tr -d "\n\r") + +KUBECONFIG_SECRET=kubeconfig_secret.json +cat > "${KUBECONFIG_SECRET}" << EOF +{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "kubeconfig" + }, + "type": "Opaque", + "data": { + "kubeconfig": "${KUBECONFIG_CONTENTS}" + } +} +EOF + +LOCAL_KUBECONFIG=${KUBE_ROOT}/test/kubemark/kubeconfig.loc +cat > "${LOCAL_KUBECONFIG}" << EOF +apiVersion: v1 +kind: Config +users: +- name: admin + user: + client-certificate-data: ${KUBECFG_CERT_BASE64} + client-key-data: ${KUBECFG_KEY_BASE64} + username: admin + password: admin +clusters: +- name: kubemark + cluster: + certificate-authority-data: ${CA_CERT_BASE64} + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: admin + name: kubemark-context +current-context: kubemark-context +EOF + sed "s/##masterip##/\"${MASTER_IP}\"/g" ${KUBE_ROOT}/test/kubemark/hollow-kubelet_template.json > ${KUBE_ROOT}/test/kubemark/hollow-kubelet.json sed -i'' -e "s/##numreplicas##/${NUM_MINIONS:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-kubelet.json sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-kubelet.json kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json +kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark" kubectl create -f ${KUBE_ROOT}/test/kubemark/hollow-kubelet.json --namespace="kubemark" + +rm ${KUBECONFIG_SECRET} diff --git a/test/kubemark/stop-kubemark.sh b/test/kubemark/stop-kubemark.sh index c0201b1bd6e..5c1f0ccb5d7 100755 --- a/test/kubemark/stop-kubemark.sh +++ b/test/kubemark/stop-kubemark.sh @@ -37,3 +37,5 @@ gcloud compute disks delete \ --quiet \ --zone "${ZONE}" \ "${MASTER_NAME}"-pd || true + +rm -rf "kubeconfig.loc" &> /dev/null || true