Merge pull request #1209 from FrenchBen/clean-cloud

Removed extra cloud make
This commit is contained in:
Justin Cormack 2017-03-02 17:12:31 -08:00 committed by GitHub
commit 8bb23d79ac
28 changed files with 2 additions and 1286 deletions

View File

@ -1,4 +1,4 @@
.PHONY: test hyperkit-test qemu qemu-iso qemu-gce media ebpf ci ci-pr get get-regextract
.PHONY: test hyperkit-test qemu qemu-iso media ebpf ci ci-pr get get-regextract
all:
$(MAKE) -C alpine
@ -26,10 +26,6 @@ alpine/mobylinux-bios.iso:
alpine/mobylinux-efi.iso:
$(MAKE) -C alpine mobylinux-efi.iso
.PHONY: alpine/gce.img.tar.gz
alpine/gce.img.tar.gz:
$(MAKE) -C alpine gce.img.tar.gz
QEMU_IMAGE=mobylinux/qemu:0fb8c648e8ed9ef6b1ec449587aeab6c53872744@sha256:606f30d815102e73bc01c07915dc0d5f153b0252c63f5f0ed1e39621ec656eb5
# interactive versions need to use volume mounts
@ -39,9 +35,6 @@ qemu: alpine/initrd.img kernel/x86_64/vmlinuz64
qemu-iso: alpine/mobylinux-bios.iso
docker run -it --rm -v $(CURDIR)/alpine/mobylinux-bios.iso:/tmp/mobylinux-bios.iso $(QEMU_IMAGE)
qemu-gce: alpine/gce.img.tar.gz
docker run -it --rm -v $(CURDIR)/alpine/gce.img.tar.gz:/tmp/gce.img.tar.gz $(QEMU_IMAGE)
bin:
mkdir -p $@

View File

@ -56,8 +56,4 @@ RUN \
rc-update add oom default && \
rc-update add test default && \
rc-update add containerd default && \
rc-update add aws default && \
rc-update add azure default && \
rc-update add gcp-hostname default && \
rc-update add gcp-startup default && \
true

View File

@ -21,8 +21,6 @@ TAR2INITRD_IMAGE=mobylinux/tar2initrd:d5711601eb5b89de0f052d87365e18388ff3f1b5@s
TARTAR2INITRD_IMAGE=mobylinux/tartar2initrd:d56cde1558e3080e59a32e3cd7c7141baa601811@sha256:e1ad4522ff906d339da5f250b9ef6bffa5a70b4dec7d2cf7f7dbd0447b79352f
GCE_IMAGE=mobylinux/mkimage-gce:83158ebe303a970a3131c3d02ef2ba00ca5e4a14@sha256:51c30fbf946ca9967688a411e7bf69769a22987cd47ad03428491dd1556fcf63
MKIMAGE_BASE=mobylinux/mkimage-base:870f7512498f2ce5feccebe15fb0d03c5c3ebac2@sha256:47d1ed872b6a44f13b61ea80b3eeab4519dc151c7d684a89a53aa26233b4e087
moby.img: Dockerfile etc usr init
@ -48,9 +46,6 @@ moby.img: Dockerfile etc usr init
-C packages/test etc -C ../.. \
-C packages/iptables usr -C ../.. \
-C packages/containerd etc -C ../.. \
-C packages/aws etc -C ../.. \
-C packages/azure etc -C ../.. \
-C packages/gcp etc -C ../.. \
| \
docker build -q - ) && [ -n "$$BUILD" ] && echo "Built $$BUILD" && \
echo $$BUILD > mobylinux.tag && \
@ -81,126 +76,8 @@ mobylinux-efi.iso: initrd.img ../kernel/x86_64/vmlinuz64
mobylinux-bios.iso: initrd.img ../kernel/x86_64/vmlinuz64
tar cf - initrd.img -C ../kernel/x86_64 vmlinuz64 | docker run --rm --net=none --log-driver=none -i $(BIOS_IMAGE) >$@
gce: gce.img.tar.gz
gce.img.tar.gz: initrd.img ../kernel/x86_64/vmlinuz64
tar cf - initrd.img -C ../kernel/x86_64 vmlinuz64 | docker run --rm --net=none --log-driver=none -i $(GCE_IMAGE) >$@
gce-upload:
ifeq ($(FORCE_GSUTIL_AUTH),1)
-docker rm gsutil-moby-config 2>/dev/null
docker run -it --name gsutil-moby-config google/cloud-sdk gcloud auth login --brief --project=docker-for-gcp
endif
docker run --rm -ti \
--volumes-from gsutil-moby-config \
-v $(CURDIR)/gce.img.tar.gz:/gce.img.tar.gz \
google/cloud-sdk \
gsutil cp -a public-read /gce.img.tar.gz gs://docker-for-gcp-images/latest/gce.img.tar.gz
ami: initrd.img ../kernel/x86_64/vmlinuz64
tar cf - \
cloud initrd.img -C .. kernel/x86_64/vmlinuz64 \
| \
docker build -t moby-ami:build -f cloud/Dockerfile.ami -
# The EBS device seems not to show up without mounting in /dev, even
# with --privileged enabled.
docker run \
--rm \
--privileged \
-v /dev:/dev \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_ACCESS_KEY_ID \
-e TAG_KEY \
-e TAG_KEY_PREV \
-e CHANNEL \
-e MOBY_SRC_ROOT \
-e DOCKER_BIN_URL \
moby-ami:build clean
docker run \
--rm \
--privileged \
-v /dev:/dev \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_ACCESS_KEY_ID \
-e TAG_KEY \
-e TAG_KEY_PREV \
-e CHANNEL \
-e MOBY_SRC_ROOT \
-e DOCKER_BIN_URL \
moby-ami:build bake >./cloud/aws/ami_id.out
ami-clean-mount:
docker run \
--rm \
--privileged \
-v /dev:/dev \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_ACCESS_KEY_ID \
-e TAG_KEY \
-e TAG_KEY_PREV \
-e CHANNEL \
-e MOBY_SRC_ROOT \
-e DOCKER_BIN_URL \
moby-ami:build clean-mount
# TODO(nathanleclaire): Migrate this to docker/editions repo.
uploadvhd: azure
docker run \
-i \
-e VHD_SIZE \
-e AZURE_STG_ACCOUNT_KEY \
-e AZURE_STG_ACCOUNT_NAME \
-e CONTAINER_NAME \
--log-driver none \
--rm \
-v vhdartifact:/tmp \
moby-azure:build \
uploadvhd >./cloud/azure/vhd_blob_url.out
azure: initrd.img ../kernel/x86_64/vmlinuz64 vhdartifact
tar cf - \
cloud initrd.img -C .. kernel/x86_64/vmlinuz64 \
| \
docker build -t moby-azure:build -f cloud/Dockerfile.azure -
tar cf - \
cloud \
| \
docker build -t moby-azure:raw2vhd -f cloud/Dockerfile.raw2vhd -
# -v /dev:/dev needed in addition to --privileged due to creation of
# loopback device (mount namespace?)
docker run \
--rm \
--privileged \
--log-driver none \
-v vhdartifact:/tmp \
-v /dev:/dev \
moby-azure:build \
makeraw
docker run \
--rm \
--log-driver none \
-v vhdartifact:/tmp \
moby-azure:raw2vhd
docker run \
--rm \
-i \
--log-driver none \
-v vhdartifact:/tmp \
moby-azure:build \
tarout \
| tar -xvf -
vhdartifact:
# NB: Multiple 'docker volume create' with same name does not return
# non-zero even though maybe it should. The '|| true' is included as
# future insurance.
docker volume create --name vhdartifact || true
clean:
rm -f *.img *.vhd *.iso *.tag mobylinux.efi etc/moby-commit gce.img.tar.gz
docker images -q moby-azure:build | xargs docker rmi -f || true
docker images -q moby-azure:raw2vhd | xargs docker rmi -f || true
docker volume rm vhdartifact || true
rm -f *.img *.vhd *.iso *.tag mobylinux.efi etc/moby-commit
$(MAKE) -C packages clean
$(MAKE) -C containers clean
$(MAKE) -C test clean

View File

@ -1,14 +0,0 @@
FROM mobylinux/alpine-aws:da39a3ee5e6b4b0d3255bfef95601890afd80709@sha256:70799a47473700b5000557f07c0c2b6d293b017a5388c21dd5abe94f6b878632
RUN mkdir /build
RUN mkdir /scripts
WORKDIR /scripts
COPY ./kernel/x86_64/vmlinuz64 /build
COPY ./initrd.img /build
COPY ./cloud/aws/syslinux.cfg /build/syslinux.cfg
COPY ./cloud/build-common.sh .
COPY ./cloud/aws/common.sh .
COPY ./cloud/aws/aws.sh .
COPY ./cloud/aws/bake-ami.sh .
ENTRYPOINT ["./aws.sh"]

View File

@ -1,14 +0,0 @@
FROM docker4x/azure-vhd-utils@sha256:2fd21df46e65d2f4007133e664a0a81611d8d23f7badedce56ca8d2f9ca39f94
RUN mkdir /build
RUN mkdir /scripts
WORKDIR /scripts
COPY ./kernel/x86_64/vmlinuz64 /build
COPY ./initrd.img /build
COPY ./cloud/azure/syslinux.cfg /build/syslinux.cfg
COPY ./cloud/build-common.sh .
COPY ./cloud/azure/bake-azure.sh .
COPY ./cloud/azure/azure.sh .
VOLUME ["/tmp"]
ENTRYPOINT ["./azure.sh"]

View File

@ -1,12 +0,0 @@
FROM golang:alpine
RUN apk add --update \
curl \
e2fsprogs \
syslinux \
multipath-tools \
git \
tar \
util-linux
RUN go get -u github.com/Microsoft/azure-vhd-utils

View File

@ -1,6 +0,0 @@
FROM mobylinux/alpine-qemu:97cc67f5569c437175f2e54b3c3b9a96a8615a16@sha256:80e17a465b332d774fd91b53c0bcb18ed0ea8a77c17bf8d8451c57a8ab8b4e66
COPY ./cloud/azure/raw2vhd.sh /raw2vhd.sh
VOLUME ["/tmp"]
ENTRYPOINT ["/raw2vhd.sh"]

View File

@ -1,18 +0,0 @@
# Build Azure VHD
To create the Azure VHD, the following will be needed:
* An azure account
* A Standard Storage account
* A container (bucket) in the above storage account (private)
* The access key associated with the above storage account
* (opt) the url for the docker version you want to use in the VHD
In your terminal, with docker installed, run the following:
```
export AZURE_STG_ACCOUNT_NAME="<your-storage-account>"
export AZURE_STG_ACCOUNT_KEY="<your-access-key>"
export CONTAINER_NAME="<a-bucket-name>"
make uploadvhd DOCKER_BIN_URL="<tgz-docker-url>"
```
The above will output a URL which you can then use to deploy on editions.

View File

@ -1 +0,0 @@
*.out

View File

@ -1,70 +0,0 @@
# Compile Moby for AWS (Amazon Machine Image)
#### Requirements
To compile, the requirements are:
1. Must be working on a EC2 instance
2. Must have `docker` and `docker-compose` installed
3. Must have configured Amazon credentials on instances (`aws configure`)
(The build will mount `~/.aws` into the build container).
#### Building
To bake the AMI:
```console
$ make ami
```
Inside of the `alpine/` subdirectory of the main Moby repo.
This will:
1. Clean up any remaining artifacts of old AMI builds
2. Creates a new EBS volume and attaches it to the build instance
3. Formats and partitions the volume for installation of Linux
4. Sets up artifacts (`initrd.img` and `vmlinuz64`) inside the new partition for booting
5. Installs MBR to boot syslinux to the device
6. Takes snapshot of EBS volume with Moby installed
7. Turns the snapshot into an AMI
#### Testing
Once the AMI has been created a file, `aws/ami_id.out` will be written which
contains its ID.
You can boot a small AWS instance from this AMI using the `aws/run-instance.sh`
script.
There is no SSH available today, but inbound access on the Docker API should
work if you configure a proper security group and attach it to the instance.
For instance, allow inbound access on `:2375` and a command such as this from
your compiler instance should work to get a "root-like" shell:
```console
$ docker -H 172.31.2.176:2375 \
run -ti \
--privileged \
--pid host \
debian \
nsenter -t 1 -m
```
Alternatively, you can also have the `aws/run-instance.sh` script create a
security group and Swarm for you automatically (including worker/agent
instances to join the cluster).
To do so, set the `JOIN_INSTANCES` environment variable to any value, and
specify how many "joiners" (worker nodes) you want to also spin up using the
`JOINERS_COUNT` environment variable (the default is 1). e.g.:
```
$ JOIN_INSTANCES=1 JOINERS_COUNT=3 ./aws/run-instance.sh
```
This will give you a 4 node cluster with a manager named
`docker-swarm-manager`, and workers named `docker-swarm-joiner-0`,
`docker-swarm-joiner-1`, and so on.

View File

@ -1,52 +0,0 @@
#!/bin/sh
# Usage: ./aws/approve-account.sh [ACCOUNT_ID] [AMI_FILE]
#
# ACCOUNT_ID must be a valid AWS account ID
#
# AMI_FILE must be a newline-delimited file containing the AMI IDs to approve
# launch permissions for the given account and their region, e.g.:
#
# ami-xxxxxxx us-west-1
# ami-yyyyyyy us-east-1
set -e
. "cloud/build-common.sh"
. "cloud/aws/common.sh"
USER_ID="$1"
if [ ${#USER_ID} -lt 12 ]
then
# Pad zeros in front so it will always be 12 chars long, e.g. some AWS
# accounts have ID like '123123123123' and others like '000123123123'
USER_ID_PADDED=$(printf "%0$((12-${#USER_ID}))d%s" 0 ${USER_ID})
else
USER_ID_PADDED="${USER_ID}"
fi
AMI_FILE="$2"
if [ ! -f ${AMI_FILE} ]
then
errecho "AMI file not found."
exit 1
fi
while read REGION_AMI_ID
do
REGION=$(echo ${REGION_AMI_ID} | cut -d' ' -f 1)
IMAGE_ID=$(echo ${REGION_AMI_ID} | cut -d' ' -f 2)
arrowecho "Approving launch for ${IMAGE_ID} in ${REGION}"
aws ec2 modify-image-attribute \
--region ${REGION} \
--image-id ${IMAGE_ID} \
--launch-permission "{
\"Add\": [{
\"UserId\": \"${USER_ID_PADDED}\"
}]
}"
done <${AMI_FILE}
arrowecho "Done approving account ${USER_ID_PADDED}"

View File

@ -1,7 +0,0 @@
#!/bin/sh
./bake-ami.sh "$@" 1>&2
if [ "$1" = "bake" ]
then
cat /build/ami_id.out
fi

View File

@ -1,152 +0,0 @@
#!/bin/sh
# Script to automate creation and snapshotting of a Moby AMI. Currently, it's
# intended to be invoked from an instance running in the same region as the
# target AMI will be in, since it directly mounts the created EBS volume as a
# device on this running instance.
set -e
PROVIDER="aws"
. "./build-common.sh"
. "./common.sh"
export AWS_DEFAULT_REGION=$(current_instance_region)
# TODO(nathanleclaire): This device could be calculated dynamically to avoid conflicts.
EBS_DEVICE=/dev/xvdb
CHANNEL=${CHANNEL:-editions}
DAY=$(date +"%m_%d_%Y")
bake_image()
{
# Create a new EBS volume. We will format this volume to boot into Moby
# initrd via syslinux in MBR. That formatted drive can then be snapshotted
# and turned into an AMI.
VOLUME_ID=$(aws ec2 create-volume \
--size 1 \
--availability-zone $(current_instance_az) | jq -r .VolumeId)
tag ${VOLUME_ID} ${DAY} ${CHANNEL}
aws ec2 wait volume-available --volume-ids ${VOLUME_ID}
arrowecho "Attaching volume"
aws ec2 attach-volume \
--volume-id ${VOLUME_ID} \
--device ${EBS_DEVICE} \
--instance-id $(current_instance_id) >/dev/null
aws ec2 wait volume-in-use --volume-ids ${VOLUME_ID}
format_on_device "${EBS_DEVICE}"
configure_syslinux_on_device_partition "${EBS_DEVICE}" "${EBS_DEVICE}1"
arrowecho "Taking snapshot!"
# Take a snapshot of the volume we wrote to.
SNAPSHOT_ID=$(aws ec2 create-snapshot \
--volume-id ${VOLUME_ID} \
--description "Snapshot of Moby device for AMI baking" | jq -r .SnapshotId)
tag ${SNAPSHOT_ID} ${DAY} ${CHANNEL}
arrowecho "Waiting for snapshot completion"
aws ec2 wait snapshot-completed --snapshot-ids ${SNAPSHOT_ID}
# Convert that snapshot into an AMI as the root device.
IMAGE_ID=$(aws ec2 register-image \
--name "${IMAGE_NAME}" \
--description "${IMAGE_DESCRIPTION}" \
--architecture x86_64 \
--root-device-name "${EBS_DEVICE}" \
--virtualization-type "hvm" \
--block-device-mappings "[
{
\"DeviceName\": \"${EBS_DEVICE}\",
\"Ebs\": {
\"SnapshotId\": \"${SNAPSHOT_ID}\"
}
}
]" | jq -r .ImageId)
tag ${IMAGE_ID} ${DAY} ${CHANNEL}
# Boom, now you (should) have a Moby AMI.
arrowecho "Created AMI: ${IMAGE_ID}"
echo "${IMAGE_ID}" >"${MOBY_SRC_ROOT}/ami_id.out"
}
clean_volume_mount()
{
VOLUME_ID=$(aws ec2 describe-volumes --filters "Name=tag-key,Values=$1" | jq -r .Volumes[0].VolumeId)
if [ ${VOLUME_ID} = "null" ]
then
arrowecho "No volume found, skipping"
else
arrowecho "Detaching volume"
aws ec2 detach-volume --volume-id ${VOLUME_ID} >/dev/null || errecho "WARN: Error detaching volume!"
aws ec2 wait volume-available --volume-ids ${VOLUME_ID}
arrowecho "Deleting volume"
aws ec2 delete-volume --volume-id ${VOLUME_ID} >/dev/null
fi
}
clean_tagged_resources()
{
if [ -d "${MOBY_SRC_ROOT}/moby" ]
then
rm -rf "${MOBY_SRC_ROOT}/moby"
fi
clean_volume_mount $1
IMAGE_ID=$(aws ec2 describe-images --filters "Name=tag-key,Values=$1" | jq -r .Images[0].ImageId)
if [ ${IMAGE_ID} = "null" ]
then
arrowecho "No image found, skipping"
else
arrowecho "Deregistering previously baked AMI"
# Sometimes describe-images does not return null even if the found
# image cannot be deregistered
#
# TODO(nathanleclaire): More elegant solution?
aws ec2 deregister-image --image-id ${IMAGE_ID} >/dev/null || errecho "WARN: Issue deregistering previously tagged image!"
fi
SNAPSHOT_ID=$(aws ec2 describe-snapshots --filters "Name=tag-key,Values=$1" | jq -r .Snapshots[0].SnapshotId)
if [ ${SNAPSHOT_ID} = "null" ]
then
arrowecho "No snapshot found, skipping"
else
arrowecho "Deleting volume snapshot"
aws ec2 delete-snapshot --snapshot-id ${SNAPSHOT_ID}
fi
}
if [ -z "${AWS_ACCESS_KEY_ID}" ] || [ -z "${AWS_SECRET_ACCESS_KEY}" ]
then
errecho "Must set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to authenticate with AWS."
exit 1
fi
case "$1" in
bake)
bake_image
;;
clean)
arrowecho "Cleaning resources from previous build tag (${TAG_KEY_PREV}) if applicable..."
clean_tagged_resources "${TAG_KEY_PREV}"
arrowecho "Cleaning resources from current build tag (${TAG_KEY}) if applicable..."
clean_tagged_resources "${TAG_KEY}"
;;
clean-mount)
clean_volume_mount "${TAG_KEY}"
;;
*)
errecho "Command $1 not found. Usage: ./bake-ami.sh [bake|clean|clean-mount]"
esac

View File

@ -1,36 +0,0 @@
#!/bin/sh
set -e
TAG_KEY=${TAG_KEY:-editionsdev}
TAG_KEY_PREV=${TAG_KEY_PREV:-editionsdev}
INSTANCE_ENDPOINT="http://169.254.169.254/latest"
INSTANCE_METADATA_API_ENDPOINT="${INSTANCE_ENDPOINT}/meta-data/"
IMAGE_NAME=${IMAGE_NAME:-"Moby Linux ${TAG_KEY}"}
IMAGE_DESCRIPTION=${IMAGE_DESCRIPTION:-"The best OS for running Docker, version ${TAG_KEY}"}
current_instance_region()
{
curl -s "${INSTANCE_ENDPOINT}/dynamic/instance-identity/document" | jq .region -r
}
current_instance_az()
{
curl -s "${INSTANCE_METADATA_API_ENDPOINT}/placement/availability-zone"
}
current_instance_id()
{
curl -s "${INSTANCE_METADATA_API_ENDPOINT}/instance-id"
}
# We tag resources created as part of the build to ensure that they can be
# cleaned up later.
tag()
{
arrowecho "Tagging $1 with ${TAG_KEY}, $2, and $3"
aws ec2 create-tags --resources "$1" --tags "Key=${TAG_KEY},Value=" >/dev/null
aws ec2 create-tags --resources "$1" --tags "Key=date,Value=$2" >/dev/null
aws ec2 create-tags --resources "$1" --tags "Key=channel,Value=$3" >/dev/null
}

View File

@ -1,89 +0,0 @@
#!/bin/sh
# Usage: ./cloud/aws/copy-amis.sh
# Assumptions:
# - A finished Moby AMI ID has been deposited in ./cloud/aws/ami_id.out. (This is
# the behavior of the ./cloud/aws/bake-ami.sh script)
# - (recommended) IMAGE_NAME is set to a custom value (e.g., "Moby Linux")
# - (recommended) IMAGE_DESCRIPTION is set to a custom value (e.g., "1.12-0-tp1")
#
#
# Outputs:
# - A file of newline delimited AMI IDs representing the AMI for each region.
# - A file containing a subsection of a CloudFormation template outlining these AMIs (JSON).
set -e
. "cloud/build-common.sh"
. "cloud/aws/common.sh"
SOURCE_AMI_ID=$(cat ./cloud/aws/ami_id.out)
# To have a list of just the IDs (approve accounts later if desired)
AMIS_IDS_DEST="./cloud/aws/copied_image_regions_${SOURCE_AMI_ID}.out"
# File to drop the (mostly correct) CF template section in
CF_TEMPLATE="./cloud/aws/cf_image_regions_${SOURCE_AMI_ID}.out"
cfecho()
{
echo "$@" >>${CF_TEMPLATE}
}
cfprintf()
{
printf "$@" >>${CF_TEMPLATE}
}
if [ -f ${AMIS_IDS_DEST} ]
then
rm ${AMIS_IDS_DEST}
fi
if [ -f ${CF_TEMPLATE} ]
then
rm ${CF_TEMPLATE}
fi
cfecho '"AWSRegionArch2AMI": {'
REGIONS="us-west-1 us-west-2 us-east-1 eu-west-1 eu-central-1 ap-southeast-1 ap-northeast-1 ap-southeast-2 ap-northeast-2 sa-east-1"
# (last element of array to emit no comma for JSON)
LAST_REGION="${REGIONS##* }"
for REGION in ${REGIONS}
do
REGION_AMI_ID=$(aws ec2 copy-image \
--source-region $(current_instance_region) \
--source-image-id "${SOURCE_AMI_ID}" \
--region "${REGION}" \
--name "${IMAGE_NAME}" \
--description "${IMAGE_DESCRIPTION}" | jq -r .ImageId)
echo "${REGION_AMI_ID}"
echo "${REGION} ${REGION_AMI_ID}" >>${AMIS_IDS_DEST}
cfprintf " \"${REGION}\": {
\"HVM64\": \"${REGION_AMI_ID}\",
\"HVMG2\": \"NOT_SUPPORTED\"
}"
# TODO: Not amazing way to determine last element.
if [ ${REGION} != "${LAST_REGION}" ]
then
cfecho ","
else
cfecho
fi
done
cfecho "}"
echo "All done. The results for adding to CloudFormation can be"
echo "viewed here:"
arrowecho ${CF_TEMPLATE}
echo
echo "The plain list of AMIs can be viewed here:"
arrowecho ${AMIS_IDS_DEST}

View File

@ -1,9 +0,0 @@
#!/bin/sh
logcmd()
{
"$@" 2>&1 | awk -v timestamp="$(date) " '$0=timestamp$0' >>/var/log/docker-swarm.log
}
logcmd docker swarm join {{MANAGER_IP}}:4500
logcmd docker swarm info

View File

@ -1,15 +0,0 @@
#!/bin/sh
METADATA=http://169.254.169.254/latest/meta-data
# TODO: This dial retry loop should be handled by openrc maybe? (or by docker
# service)
docker swarm init \
--secret "" \
--auto-accept manager \
--auto-accept worker \
--listen-addr $(wget -qO- ${METADATA}/local-ipv4 | sed 's/http:\/\///'):4500 \
>>/var/log/docker-swarm.log 2>&1
exit 0
exit 1

View File

@ -1,138 +0,0 @@
#!/bin/sh
# Quick script to boot an instance from generated AMI. Intended to be invoked
# from "alpine" directory.
set -e
JOINERS_COUNT=${JOINERS_COUNT:-1}
METADATA="http://169.254.169.254/latest/meta-data"
MANAGER_SG="docker-swarm-ingress"
manager_sg_id()
{
aws ec2 describe-security-groups \
--filter Name=group-name,Values=${MANAGER_SG} | jq -r .SecurityGroups[0].GroupId
}
attach_security_group()
{
MANAGER_SG_ID=$(manager_sg_id)
if [ ${MANAGER_SG_ID} = "null" ]
then
CUR_INSTANCE_MAC=$(wget -qO- ${METADATA}/network/interfaces/macs)
CUR_INSTANCE_VPC_CIDR=$(wget -qO- ${METADATA}/network/interfaces/macs/${CUR_INSTANCE_MAC}vpc-ipv4-cidr-block)
MANAGER_SG_ID=$(aws ec2 create-security-group \
--group-name ${MANAGER_SG} \
--description "Allow inbound access to Docker API and for remote join node connection" | jq -r .GroupId)
echo "Created security group ${MANAGER_SG_ID}"
# Hack to wait for SG to be created before adding rules
sleep 5
# For Docker API
aws ec2 authorize-security-group-ingress \
--group-id ${MANAGER_SG_ID} \
--protocol tcp \
--port 2375 \
--cidr ${CUR_INSTANCE_VPC_CIDR}
# For Swarm join node connection
aws ec2 authorize-security-group-ingress \
--group-id ${MANAGER_SG_ID} \
--protocol tcp \
--port 4500 \
--cidr ${CUR_INSTANCE_VPC_CIDR}
fi
aws ec2 modify-instance-attribute \
--instance-id "$1" \
--groups ${MANAGER_SG_ID}
}
poll_instance_log()
{
echo "Waiting for instance boot log to become available"
INSTANCE_BOOT_LOG="null"
while [ ${INSTANCE_BOOT_LOG} = "null" ]
do
INSTANCE_BOOT_LOG=$(aws ec2 get-console-output --instance-id "$1" | jq -r .Output)
sleep 5
done
aws ec2 get-console-output --instance-id "$1" | jq -r .Output
}
OLD_INSTANCE_IDS=$(cat ./cloud/aws/instance_id.out | tr '\n' ' ')
aws ec2 terminate-instances --instance-id ${OLD_INSTANCE_IDS} || true
if [ ! -f ./cloud/aws/ami_id.out ]
then
echo "AMI ID to launch instance from not found"
exit 1
fi
AMI_ID=$(cat ./cloud/aws/ami_id.out)
echo "Using image ${AMI_ID}"
MANAGER_INSTANCE_ID=$(aws ec2 run-instances \
--image-id ${AMI_ID} \
--instance-type t2.micro \
--user-data file://./cloud/aws/manager-user-data.sh | jq -r .Instances[0].InstanceId)
aws ec2 create-tags --resources ${MANAGER_INSTANCE_ID} --tags Key=Name,Value=$(whoami)-docker-swarm-manager
echo "Running manager instance ${MANAGER_INSTANCE_ID}"
# Deliberately truncate file here.
echo ${MANAGER_INSTANCE_ID} >./cloud/aws/instance_id.out
attach_security_group ${MANAGER_INSTANCE_ID}
# User can set this variable to indicate they want a whole swarm.
if [ ! -z "$JOIN_INSTANCES" ]
then
MANAGER_IP=$(aws ec2 describe-instances \
--instance-id ${MANAGER_INSTANCE_ID} | jq -r .Reservations[0].Instances[0].NetworkInterfaces[0].PrivateIpAddresses[0].PrivateIpAddress)
TMP_JOINER_USERDATA=/tmp/joiner-user-data-${MANAGER_INSTANCE_ID}.sh
cat ./cloud/aws/joiner-user-data.sh | sed "s/{{MANAGER_IP}}/${MANAGER_IP}/" >${TMP_JOINER_USERDATA}
JOINER_INSTANCE_IDS=$(aws ec2 run-instances \
--image-id ${AMI_ID} \
--instance-type t2.micro \
--count ${JOINERS_COUNT} \
--user-data file://${TMP_JOINER_USERDATA} | jq -r .Instances[].InstanceId)
echo "Joining nodes:" ${JOINER_INSTANCE_IDS}
NODE_NUMBER=0
for ID in ${JOINER_INSTANCE_IDS}
do
echo "Tagging joiner instance #${NODE_NUMBER}: ${ID}"
# For debugging purposes only. In "production" this SG should not be
# attached to these instances.
attach_security_group ${ID}
# Do not truncate file here.
echo ${ID} >>./cloud/aws/instance_id.out
# TODO: Get list of ids and do this for each if applicable.
aws ec2 create-tags --resources ${ID} --tags Key=Name,Value=$(whoami)-docker-swarm-joiner-${NODE_NUMBER}
NODE_NUMBER=$((NODE_NUMBER+1))
done
exit
fi
echo "Waiting for manager to be running..."
aws ec2 wait instance-running --instance-ids $(cat ./cloud/aws/instance_id.out | tr '\n' ' ')
poll_instance_log ${MANAGER_INSTANCE_ID}

View File

@ -1,7 +0,0 @@
DEFAULT linux
TIMEOUT 0
PROMPT 0
LABEL linux
KERNEL /vmlinuz64
INITRD /initrd.img
APPEND root=/dev/xvdb1 console=tty0 console=tty1 console=ttyS0 mobyplatform=aws vsyscall=emulate page_poison=1

View File

@ -1,12 +0,0 @@
#!/bin/sh
if [ "$1" = "tarout" ]
then
tar --directory /tmp -cf - -S mobylinux.vhd
else
./bake-azure.sh "$@" 1>&2
if [ "$1" = "uploadvhd" ]
then
cat vhd_blob_url.out
fi
fi

View File

@ -1,96 +0,0 @@
#!/bin/sh
# Script to automate the creation of a VHD for Moby in Azure, and upload it to
# an Azure storage account (needed in order to launch it on Azure, or upload it
# to the Azure Marketplace).
#
# Usage: ./bake-azure.sh (intended to be invoked in a Docker container with
# specific properties, see the 'alpine' dir / Makefile)
#
# Parameters (override as environment variables):
#
# AZURE_STG_ACCOUNT_NAME: Name of the storage account to upload the VHD to.
#
# AZURE_STG_ACCOUNT_KEY: Key needed to access the storage account to upload the
# VHD. This can be accessed in the storage account in the web portal.
#
# CONTAINER_NAME: Name of the container in the storage account to place the
# created VHD in. "Container" here is NOT a Docker/Linux container, it is
# similar to "bucket" in AWS parlance.
#
# BLOBNAME: Name of the created VHD "blob". e.g., "foobar-mobylinux.vhd"
set -e
PROVIDER="azure"
. "build-common.sh"
case "$1" in
makeraw)
RAW_IMAGE="/tmp/mobylinux.img"
if [ -f "${RAW_IMAGE}" ]
then
rm "${RAW_IMAGE}"
fi
VHD_SIZE=${VHD_SIZE:-"30G"}
arrowecho "Writing empty image file"
dd if=/dev/zero of="${RAW_IMAGE}" count=0 bs=1 seek="${VHD_SIZE}"
arrowecho "Formatting image file for boot"
format_on_device "${RAW_IMAGE}"
arrowecho "Setting up loopback device"
LOOPBACK_DEVICE="$(losetup -f --show ${RAW_IMAGE})"
arrowecho "Loopback device is ${LOOPBACK_DEVICE}"
arrowecho "Mapping partition"
MAPPED_PARTITION="/dev/mapper/$(kpartx -av ${LOOPBACK_DEVICE} | cut -d' ' -f3)"
arrowecho "Partition mapped at ${MAPPED_PARTITION}"
arrowecho "Installing syslinux and dropping artifacts on partition..."
configure_syslinux_on_device_partition "${LOOPBACK_DEVICE}" "${MAPPED_PARTITION}"
arrowecho "Cleaning up..."
kpartx -d "${LOOPBACK_DEVICE}"
losetup -d "${LOOPBACK_DEVICE}"
arrowecho "Cleanup done, outputting created image. This might take a while..."
arrowecho "Finished outputting raw image file to ${RAW_IMAGE}"
;;
uploadvhd)
if [ -z "${AZURE_STG_ACCOUNT_KEY}" ]
then
errecho "Need to set AZURE_STG_ACCOUNT_KEY for the 'dockereditions' storage account."
exit 1
fi
AZURE_STG_ACCOUNT_NAME=${AZURE_STG_ACCOUNT_NAME:-"mobyvhd"}
CONTAINER_NAME=${CONTAINER_NAME:-"vhd"}
BLOBNAME=${BLOBNAME:-$(head -c 200m "/tmp/mobylinux.vhd" | md5sum | awk '{ print $1; }')-mobylinux.vhd}
BLOB_URL="https://${AZURE_STG_ACCOUNT_NAME}.blob.core.windows.net/${CONTAINER_NAME}/${BLOBNAME}"
arrowecho "Uploading VHD to ${BLOBURL}..."
azure-vhd-utils upload \
--localvhdpath "/tmp/mobylinux.vhd" \
--stgaccountname "${AZURE_STG_ACCOUNT_NAME}" \
--stgaccountkey "${AZURE_STG_ACCOUNT_KEY}" \
--containername "${CONTAINER_NAME}" \
--blobname "${BLOBNAME}" \
--overwrite
arrowecho "VHD uploaded."
arrowecho "${BLOB_URL}"
echo "${BLOB_URL}" >vhd_blob_url.out
;;
*)
errecho "Invalid usage. Syntax: ./bake-azure.sh [makeraw|uploadvhd]"
exit 1
esac

View File

@ -1,7 +0,0 @@
#!/bin/sh
set -e
>&2 echo "Converting raw image file to VHD..."
qemu-img convert -f raw -O vpc -o subformat=fixed,force_size /tmp/mobylinux.img /tmp/mobylinux.vhd 1>&2
>&2 echo "Done converting to VHD."

View File

@ -1,5 +0,0 @@
DEFAULT linux
LABEL linux
KERNEL /vmlinuz64
INITRD /initrd.img
APPEND root=/dev/sda1 rootdelay=300 console=tty1 console=ttyS0 earlyprintk=ttyS0 mobyplatform=azure vsyscall=emulate page_poison=1

View File

@ -1,123 +0,0 @@
#!/bin/sh
set -e
MOBY_SRC_ROOT=${MOBY_SRC_ROOT:-/build}
arrowecho()
{
echo " --->" "$@"
}
errecho()
{
echo "$@" >&2
}
# $1 - the device to format (e.g. /dev/xvdb)
format_on_device()
{
while [ ! -e "$1" ]
do
sleep 0.1
done
arrowecho "Formatting boot partition"
# TODO (nathanleclaire): Any more readable or more elegant solution to
# account for this minor (specify 1st partition as bootable) difference
# between cloud builds?
if [ "${PROVIDER}" = "aws" ]
then
# This heredoc might be confusing at first glance, so here is a detailed
# summary of what each line does:
#
# n - create new partition
# p - make it a primary partition
# 1 - it should be partition #1
# \n - use default first cylinder
# \n - use default last cylinder
# a - toggle a partition as bootable
# 1 - first partition
# w - write changes and exit
fdisk "$1" << EOF
n
p
1
a
1
w
EOF
elif [ ${PROVIDER} = "azure" ]
then
fdisk "$1" << EOF
n
p
1
a
w
EOF
else
errecho "Provider not recognized: ${PROVIDER}"
exit 1
fi
# To ensure everything went smoothly, print the resulting partition table.
arrowecho "Printing device partition contents"
fdisk -l "$1"
}
# $1 - device
# $2 - partition 1 on device
configure_syslinux_on_device_partition()
{
# Mount created root partition, format it as ext4, and copy over the needed
# files for boot (syslinux configuration, kernel binary, and initrd.img)
while [ ! -e "$2" ]
do
sleep 0.1
done
arrowecho "Making filesystem on partition"
mke2fs -t ext4 "$2"
arrowecho "Mounting partition filesystem"
ROOT_PARTITION_MOUNT="${MOBY_SRC_ROOT}/moby"
if [ ! -d ${ROOT_PARTITION_MOUNT} ]
then
mkdir -p ${ROOT_PARTITION_MOUNT}
fi
mount -t ext4 "$2" ${ROOT_PARTITION_MOUNT}
arrowecho "Copying image and kernel binary to partition"
# Get files needed to boot in place.
cp ${MOBY_SRC_ROOT}/syslinux.cfg ${ROOT_PARTITION_MOUNT}
cat ${ROOT_PARTITION_MOUNT}/syslinux.cfg
cp ${MOBY_SRC_ROOT}/vmlinuz64 ${ROOT_PARTITION_MOUNT}
cp ${MOBY_SRC_ROOT}/initrd.img ${ROOT_PARTITION_MOUNT}
# From http://www.syslinux.org/wiki/index.php?title=EXTLINUX:
#
# "Note that EXTLINUX installs in the filesystem partition like a
# well-behaved bootloader :). Thus, it needs a master boot record in the
# partition table; the mbr.bin shipped with SYSLINUX should work well."
# Thus, this step installs syslinux on the mounted filesystem (partition
# 1).
arrowecho "Installing syslinux to partition"
extlinux --install ${ROOT_PARTITION_MOUNT}
# Format master boot record in partition table on target device.
arrowecho "Copying MBR to partition table in target device"
dd if=/usr/share/syslinux/mbr.bin of="$1" bs=440 count=1
umount ${ROOT_PARTITION_MOUNT}
arrowecho "Checking device/partition sanity"
fdisk -l "$1"
}

View File

@ -1,67 +0,0 @@
#!/sbin/openrc-run
description="Bootstrap procedure if running on Docker for AWS"
depend()
{
need docker
}
start()
{
[ "$(mobyplatform)" != "aws" ] && exit 0
ebegin "Running AWS-specific initialization"
INSTANCE_DATA_ENDPOINT=http://169.254.169.254/latest
METADATA="${INSTANCE_DATA_ENDPOINT}/meta-data"
USERDATA="${INSTANCE_DATA_ENDPOINT}/user-data"
USER_SSH_DIR=/home/docker/.ssh
# setup SSH keys
if [ ! -d ${USER_SSH_DIR} ]
then
mkdir -p ${USER_SSH_DIR}
chmod 700 ${USER_SSH_DIR}
fi
# Put instance SSH key in place.
wget -q -O /tmp/my-key ${METADATA}/public-keys/0/openssh-key &>/dev/null
if [ $? -eq 0 ]
then
cat /tmp/my-key >> ${USER_SSH_DIR}/authorized_keys
chmod 700 ${USER_SSH_DIR}/authorized_keys
rm /tmp/my-key
else
echo "No SSH public key found to add to instance"
fi
# TODO: The docker user should be given more permissions on FS by
# default, this is temporary hack
chown -R docker /home/docker
chgrp -R docker /home/docker
chown -R docker /var/log
chgrp -R docker /var/log
passwd -u docker
HOSTNAME=$(wget -qO- ${METADATA}/local-hostname)
# Set hostname based on what AWS tells us it should be.
echo ${HOSTNAME} >/etc/hostname
hostname -F /etc/hostname
# Needed for ELB integration.
mkdir -p /var/lib/docker/swarm
# Get user data file and use it to bootstrap Moby in the cloud
wget -q -O /tmp/user-data ${USERDATA}/
# For now we will have a shell script which executes on boot.
# TODO(nathanleclaire/kencochrane): Migrate this to mobyconfig, or similar.
if [ $? -eq 0 ]
then
sh /tmp/user-data
fi
eend 0
}

View File

@ -1,131 +0,0 @@
#!/sbin/openrc-run
description="Bootstrap procedure if running on Docker Azure edition"
depend()
{
need docker
need net
}
start()
{
[ "$(mobyplatform)" != "azure" ] && exit 0
ebegin "Running Azure-specific initialization"
for i in $(seq 1 20)
do
einfo "Pulling Windows Azure Linux Agent container"
docker pull docker4x/agent-azure >/dev/null
if [ $? -eq 0 ]
then
break
fi
# Wait for... network to come up? DNS servers to be reachable?
# Not certain, but Azure continually fails to achieve this pull so
# far because it can't dial the DNS lookup properly.
#
# TODO: Debug.
sleep 5
done
einfo "Running Windows Azure Linux Agent container"
export DOCKER_FOR_IAAS_VERSION="azure-v1.13.1-ga-2"
export DOCKER_FOR_IAAS_VERSION_DIGEST="badffbf8fff6fb1bdf05ec485b52ae15d80f6e23c6b3ba0e89abd80fb932bd84"
# "Fake" /etc/hostname setup for persistence across reboots
#
# Note the bind mount in 'docker run' below.
if [ ! -d /var/etc/ ]
then
mkdir -p /var/etc
fi
if [ ! -d /var/home/docker ]
then
mkdir -p /var/home/docker
chown -R docker:docker /var/home/docker
fi
if [ ! -f /var/etc/hostname ]
then
echo "moby" >/var/etc/hostname
fi
if [ -f /var/lib/waagent/provisioned ]
then
# During provisioning, the Azure agent usually does this, but
# on reboots, it will need to be invoked "manually".
hostname -F /var/etc/hostname
kill -HUP "$(pidof dhcpcd)"
fi
docker run -d \
--privileged \
--name agent \
--ipc host \
--pid host \
--net host \
--uts host \
--label com.docker.editions.system \
--restart unless-stopped \
-e DOCKER_FOR_IAAS_VERSION \
-v /usr/bin/docker:/usr/local/bin/docker:ro \
-v /mnt:/mnt \
-v /etc:/etc \
-v /var/etc/ssh:/etc/ssh \
-v /var/etc/hostname:/etc/hostname \
-v /var/home:/home \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/log:/var/log \
-v /lib/modules:/lib/modules \
-v /lib/firmware:/lib/firmware \
-v /var/lib/waagent:/var/lib/waagent \
"docker4x/agent-azure@sha256:$DOCKER_FOR_IAAS_VERSION_DIGEST"
# Wait for docker user to be added by agent.
while [ ! -d /home/docker ]
do
sleep 5
done
# TODO: Make this cleaner.
# User gets added by waagent.
# Need to unlock it to login via SSH.
passwd -u docker
checkpath --directory --mode 0700 /home/docker/.ssh
# Wait for WALinux agent provisioning to finish before invoking the
# passed custom data. This assures us that hostname etc. is set
# correctly before running, say, swarm commmands.
while [ ! -f /var/lib/waagent/provisioned ]
do
sleep 5
done
cat <<EOF >/etc/docker/daemon.json
{
"log-driver": "syslog",
"log-opts": {
"syslog-address": "udp://localhost:514",
"tag": "{{.Name}}/{{.ID}}"
}
}
EOF
# Ensure correct hostname according to Azure and reload daemon config
service docker restart
. /var/lib/waagent/CustomData
eend 0
}
stop()
{
[ "$(mobyplatform)" != "azure" ] && exit 0
docker rm -f agent || true
passwd -l docker
}

View File

@ -1,29 +0,0 @@
#!/sbin/openrc-run
description="Set hostname on Docker GCP edition"
depend()
{
need net
before docker
}
metadata() {
curl -sH 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/${1}
}
start()
{
[ "$(mobyplatform)" != "gcp" ] && exit 0
ebegin "Set hostname based on what GCP tells us it should be"
IP=$(metadata instance/network-interfaces/0/ip)
HOSTNAME=$(metadata instance/hostname)
echo "${IP} ${HOSTNAME} ${HOSTNAME%%.*} # Added by Google" >> /etc/hosts
echo "169.254.169.254 metadata.google.internal # Added by Google" >> /etc/hosts
echo ${HOSTNAME%%.*} >/etc/hostname
hostname -F /etc/hostname
eend 0
}

View File

@ -1,40 +0,0 @@
#!/sbin/openrc-run
description="Run startup/shudown scripts on Docker GCP edition"
depend()
{
need net
}
metadata() {
curl -sfH 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/${1}
}
start()
{
[ "$(mobyplatform)" != "gcp" ] && exit 0
ebegin "Run startup script"
temp_file=$(mktemp)
metadata /instance/attributes/startup-script > ${temp_file}
[ -s "${temp_file}" ] && sh "${temp_file}"
rm -f "${temp_file}"
eend 0
}
stop()
{
[ "$(mobyplatform)" != "gcp" ] && exit 0
ebegin "Run shutdown script"
temp_file=$(mktemp)
metadata /instance/attributes/shutdown-script > ${temp_file}
[ -s "${temp_file}" ] && sh "${temp_file}"
rm -f "${temp_file}"
eend 0
}