1
0
mirror of https://github.com/rancher/os.git synced 2025-09-16 15:09:27 +00:00

Refactor to use libcompose

This commit is contained in:
Darren Shepherd
2015-08-04 14:45:38 -07:00
parent 19f9a1b281
commit 9d76b79ac3
26 changed files with 665 additions and 1458 deletions

View File

@@ -1,10 +1,6 @@
package docker
import (
"time"
log "github.com/Sirupsen/logrus"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancherio/os/config"
)
@@ -28,25 +24,10 @@ func NewClient(endpoint string) (*dockerClient.Client, error) {
return nil, err
}
retry := false
for i := 0; i < (MAX_WAIT / INTERVAL); i++ {
_, err = client.Info()
if err == nil {
break
}
err = ClientOK(endpoint, func() bool {
_, err := client.Info()
return err == nil
})
retry = true
log.Infof("Waiting for Docker at %s", endpoint)
time.Sleep(INTERVAL * time.Millisecond)
}
if err != nil {
return nil, err
}
if retry {
log.Infof("Connected to Docker at %s", endpoint)
}
return client, nil
return client, err
}

94
docker/client_factory.go Normal file
View File

@@ -0,0 +1,94 @@
package docker
import (
"fmt"
"sync"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/docker/machine/log"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/samalba/dockerclient"
)
type ClientFactory struct {
userClient dockerclient.Client
systemClient dockerclient.Client
userOnce sync.Once
systemOnce sync.Once
}
func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) {
userOpts := opts
systemOpts := opts
userOpts.Host = config.DOCKER_HOST
systemOpts.Host = config.DOCKER_SYSTEM_HOST
userClient, err := docker.CreateClient(userOpts)
if err != nil {
return nil, err
}
systemClient, err := docker.CreateClient(systemOpts)
if err != nil {
return nil, err
}
return &ClientFactory{
userClient: userClient,
systemClient: systemClient,
}, nil
}
func (c *ClientFactory) Create(service project.Service) dockerclient.Client {
if IsSystemContainer(service.Config()) {
waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST)
return c.systemClient
}
waitFor(&c.userOnce, c.userClient, config.DOCKER_HOST)
return c.userClient
}
func waitFor(once *sync.Once, client dockerclient.Client, endpoint string) {
once.Do(func() {
err := ClientOK(endpoint, func() bool {
_, err := client.Info()
return err == nil
})
if err != nil {
panic(err.Error())
}
})
}
func ClientOK(endpoint string, test func() bool) error {
backoff := util.Backoff{}
defer backoff.Close()
var err error
retry := false
for ok := range backoff.Start() {
if !ok {
err = fmt.Errorf("Timeout waiting for Docker at %s", endpoint)
break
}
if test() {
break
}
retry = true
log.Infof("Waiting for Docker at %s", endpoint)
}
if err != nil {
return err
}
if retry {
log.Infof("Connected to Docker at %s", endpoint)
}
return nil
}

View File

@@ -1,632 +0,0 @@
package docker
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
log "github.com/Sirupsen/logrus"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/runconfig"
shlex "github.com/flynn/go-shlex"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/docker"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type Container struct {
Err error
Name string
remove bool
detach bool
Config *runconfig.Config
HostConfig *runconfig.HostConfig
dockerHost string
Container *dockerClient.Container
ContainerCfg *config.ContainerConfig
}
type ByCreated []dockerClient.APIContainers
func (c ByCreated) Len() int { return len(c) }
func (c ByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c ByCreated) Less(i, j int) bool { return c[j].Created < c[i].Created }
func getHash(containerCfg *config.ContainerConfig) string {
hash := sha1.New()
io.WriteString(hash, fmt.Sprintln(containerCfg.Id))
io.WriteString(hash, fmt.Sprintln(containerCfg.Cmd))
io.WriteString(hash, fmt.Sprintln(containerCfg.MigrateVolumes))
io.WriteString(hash, fmt.Sprintln(containerCfg.ReloadConfig))
io.WriteString(hash, fmt.Sprintln(containerCfg.CreateOnly))
if containerCfg.Service != nil {
//Get values of Service through reflection
val := reflect.ValueOf(containerCfg.Service).Elem()
//Create slice to sort the keys in Service Config, which allow constant hash ordering
serviceKeys := []string{}
//Create a data structure of map of values keyed by a string
unsortedKeyValue := make(map[string]interface{})
//Get all keys and values in Service Configuration
for i := 0; i < val.NumField(); i++ {
valueField := val.Field(i)
keyField := val.Type().Field(i)
serviceKeys = append(serviceKeys, keyField.Name)
unsortedKeyValue[keyField.Name] = valueField.Interface()
}
//Sort serviceKeys alphabetically
sort.Strings(serviceKeys)
//Go through keys and write hash
for _, serviceKey := range serviceKeys {
serviceValue := unsortedKeyValue[serviceKey]
io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey))
switch s := serviceValue.(type) {
case project.SliceorMap:
sliceKeys := []string{}
for lkey := range s.MapParts() {
if lkey != "io.rancher.os.hash" {
sliceKeys = append(sliceKeys, lkey)
}
}
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey]))
}
case project.MaporEqualSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.MaporColonSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.MaporSpaceSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.Command:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.Stringorslice:
sliceKeys := s.Slice()
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case []string:
sliceKeys := s
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
default:
io.WriteString(hash, fmt.Sprintf("%v", serviceValue))
}
}
}
return hex.EncodeToString(hash.Sum(nil))
}
func StartAndWait(dockerHost string, containerCfg *config.ContainerConfig) error {
container := NewContainer(dockerHost, containerCfg).start(false, true)
return container.Err
}
func NewContainerFromService(dockerHost string, name string, service *project.ServiceConfig) *Container {
c := &Container{
Name: name,
dockerHost: dockerHost,
ContainerCfg: &config.ContainerConfig{
Id: name,
Service: service,
},
}
return c.Parse()
}
func NewContainer(dockerHost string, containerCfg *config.ContainerConfig) *Container {
c := &Container{
dockerHost: dockerHost,
ContainerCfg: containerCfg,
}
return c.Parse()
}
func (c *Container) returnErr(err error) *Container {
c.Err = err
return c
}
func getByLabel(client *dockerClient.Client, key, value string) (*dockerClient.APIContainers, error) {
containers, err := client.ListContainers(dockerClient.ListContainersOptions{
All: true,
Filters: map[string][]string{
config.LABEL: {fmt.Sprintf("%s=%s", key, value)},
},
})
if err != nil {
return nil, err
}
if len(containers) == 0 {
return nil, nil
}
sort.Sort(ByCreated(containers))
return &containers[0], nil
}
func (c *Container) Lookup() *Container {
c.Parse()
if c.Err != nil || (c.Container != nil && c.Container.HostConfig != nil) {
return c
}
hash := getHash(c.ContainerCfg)
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
containers, err := client.ListContainers(dockerClient.ListContainersOptions{
All: true,
Filters: map[string][]string{
config.LABEL: {fmt.Sprintf("%s=%s", config.HASH, hash)},
},
})
if err != nil {
return c.returnErr(err)
}
if len(containers) == 0 {
return c
}
c.Container, c.Err = inspect(client, containers[0].ID)
return c
}
func inspect(client *dockerClient.Client, id string) (*dockerClient.Container, error) {
c, err := client.InspectContainer(id)
if err != nil {
return nil, err
}
if strings.HasPrefix(c.Name, "/") {
c.Name = c.Name[1:]
}
return c, err
}
func (c *Container) Exists() bool {
c.Lookup()
return c.Container != nil
}
func (c *Container) Reset() *Container {
c.Config = nil
c.HostConfig = nil
c.Container = nil
c.Err = nil
return c
}
func (c *Container) requiresSyslog() bool {
return (c.ContainerCfg.Service.LogDriver == "" || c.ContainerCfg.Service.LogDriver == "syslog")
}
func (c *Container) requiresUserDocker() bool {
if c.dockerHost == config.DOCKER_HOST {
return true
}
return false
}
func (c *Container) hasLink(link string) bool {
return util.Contains(c.ContainerCfg.Service.Links.Slice(), link)
}
func (c *Container) addLink(link string) {
if c.hasLink(link) {
return
}
log.Debugf("Adding %s link to %s", link, c.Name)
c.ContainerCfg.Service.Links = project.NewMaporColonSlice(append(c.ContainerCfg.Service.Links.Slice(), link))
}
func (c *Container) parseService() {
if c.requiresSyslog() {
c.addLink("syslog")
}
if c.requiresUserDocker() {
c.addLink("dockerwait")
} else if c.ContainerCfg.Service.Image != "" {
client, err := NewClient(c.dockerHost)
if err != nil {
c.Err = err
return
}
i, _ := client.InspectImage(c.ContainerCfg.Service.Image)
if i == nil {
c.addLink("network")
}
}
cfg, hostConfig, err := docker.Convert(c.ContainerCfg.Service)
if err != nil {
c.Err = err
return
}
c.Config = cfg
c.HostConfig = hostConfig
c.detach = c.Config.Labels[config.DETACH] != "false"
c.remove = c.Config.Labels[config.REMOVE] != "false"
c.ContainerCfg.CreateOnly = c.Config.Labels[config.CREATE_ONLY] == "true"
c.ContainerCfg.ReloadConfig = c.Config.Labels[config.RELOAD_CONFIG] == "true"
}
func (c *Container) parseCmd() {
flags := flag.NewFlagSet("run", flag.ExitOnError)
flRemove := flags.Bool([]string{"#rm", "-rm"}, false, "")
flDetach := flags.Bool([]string{"d", "-detach"}, false, "")
flName := flags.String([]string{"#name", "-name"}, "", "")
args, err := shlex.Split(c.ContainerCfg.Cmd)
if err != nil {
c.Err = err
return
}
log.Debugf("Parsing [%s]", strings.Join(args, ","))
c.Config, c.HostConfig, _, c.Err = runconfig.Parse(flags, args)
c.Name = *flName
c.detach = *flDetach
c.remove = *flRemove
}
func (c *Container) Parse() *Container {
if c.Config != nil || c.Err != nil {
return c
}
if len(c.ContainerCfg.Cmd) > 0 {
c.parseCmd()
} else if c.ContainerCfg.Service != nil {
c.parseService()
} else {
c.Err = errors.New("Cmd or Service must be set")
return c
}
if c.ContainerCfg.Id == "" {
c.ContainerCfg.Id = c.Name
}
return c
}
func (c *Container) Create() *Container {
return c.start(true, false)
}
func (c *Container) Start() *Container {
return c.start(false, false)
}
func (c *Container) StartAndWait() *Container {
return c.start(false, true)
}
func (c *Container) Stage() *Container {
c.Parse()
if c.Err != nil {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
c.Err = err
return c
}
_, err = client.InspectImage(c.Config.Image)
if err == dockerClient.ErrNoSuchImage {
toPull := c.Config.Image
_, tag := parsers.ParseRepositoryTag(toPull)
if tag == "" {
toPull += ":latest"
}
c.Err = client.PullImage(dockerClient.PullImageOptions{
Repository: toPull,
OutputStream: os.Stdout,
}, dockerClient.AuthConfiguration{})
} else if err != nil {
log.Errorf("Failed to stage: %s: %v", c.Config.Image, err)
c.Err = err
}
return c
}
func (c *Container) Delete() *Container {
c.Parse()
c.Stage()
c.Lookup()
if c.Err != nil {
return c
}
if !c.Exists() {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
err = client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: c.Container.ID,
Force: true,
})
if err != nil {
return c.returnErr(err)
}
return c
}
func (c *Container) renameCurrent(client *dockerClient.Client) error {
if c.Name == "" {
return nil
}
if c.Name == c.Container.Name {
return nil
}
err := client.RenameContainer(dockerClient.RenameContainerOptions{ID: c.Container.ID, Name: c.Name})
if err != nil {
return err
}
c.Container, err = inspect(client, c.Container.ID)
return err
}
func (c *Container) renameOld(client *dockerClient.Client, opts *dockerClient.CreateContainerOptions) error {
if len(opts.Name) == 0 {
return nil
}
existing, err := inspect(client, opts.Name)
if _, ok := err.(*dockerClient.NoSuchContainer); ok {
return nil
}
if err != nil {
return nil
}
if c.Container != nil && existing.ID == c.Container.ID {
return nil
}
var newName string
if label, ok := existing.Config.Labels[config.HASH]; ok {
newName = fmt.Sprintf("%s-%s", existing.Name, label)
} else {
newName = fmt.Sprintf("%s-unknown-%s", existing.Name, util.RandSeq(12))
}
if existing.State.Running {
err := client.StopContainer(existing.ID, 2)
if err != nil {
return err
}
_, err = client.WaitContainer(existing.ID)
if err != nil {
return err
}
}
log.Debugf("Renaming %s to %s", existing.Name, newName)
return client.RenameContainer(dockerClient.RenameContainerOptions{ID: existing.ID, Name: newName})
}
func (c *Container) getCreateOpts(client *dockerClient.Client) (*dockerClient.CreateContainerOptions, error) {
bytes, err := json.Marshal(c)
if err != nil {
log.Errorf("Failed to marshall: %v", c)
return nil, err
}
var opts dockerClient.CreateContainerOptions
err = json.Unmarshal(bytes, &opts)
if err != nil {
log.Errorf("Failed to unmarshall: %s", string(bytes))
return nil, err
}
if opts.Config.Labels == nil {
opts.Config.Labels = make(map[string]string)
}
hash := getHash(c.ContainerCfg)
opts.Config.Labels[config.HASH] = hash
opts.Config.Labels[config.ID] = c.ContainerCfg.Id
return &opts, nil
}
func appendVolumesFrom(client *dockerClient.Client, containerCfg *config.ContainerConfig, opts *dockerClient.CreateContainerOptions) error {
if !containerCfg.MigrateVolumes {
return nil
}
container, err := getByLabel(client, config.ID, containerCfg.Id)
if err != nil || container == nil {
return err
}
if opts.HostConfig.VolumesFrom == nil {
opts.HostConfig.VolumesFrom = []string{container.ID}
} else {
opts.HostConfig.VolumesFrom = append(opts.HostConfig.VolumesFrom, container.ID)
}
return nil
}
func (c *Container) start(createOnly, wait bool) *Container {
log.Debugf("Container: STARTING '%v', createOnly: %v, !detach: %v, wait: %v", c.Name, createOnly, !c.detach, wait)
c.Lookup()
c.Stage()
if c.Err != nil {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
created := false
opts, err := c.getCreateOpts(client)
if err != nil {
log.Errorf("Failed to create container create options: %v", err)
return c.returnErr(err)
}
if c.Exists() && c.remove {
log.Debugf("Deleting container %s", c.Container.ID)
c.Delete()
if c.Err != nil {
return c
}
c.Reset().Lookup()
if c.Err != nil {
return c
}
}
if !c.Exists() {
err = c.renameOld(client, opts)
if err != nil {
return c.returnErr(err)
}
err := appendVolumesFrom(client, c.ContainerCfg, opts)
if err != nil {
return c.returnErr(err)
}
c.Container, err = client.CreateContainer(*opts)
created = true
if err != nil {
return c.returnErr(err)
}
}
hostConfig := c.Container.HostConfig
if created {
hostConfig = opts.HostConfig
}
if createOnly {
return c
}
if !c.Container.State.Running {
if !created {
err = c.renameOld(client, opts)
if err != nil {
return c.returnErr(err)
}
}
err = c.renameCurrent(client)
if err != nil {
return c.returnErr(err)
}
err = client.StartContainer(c.Container.ID, hostConfig)
if err != nil {
log.Errorf("Error from Docker %s", err)
return c.returnErr(err)
}
}
log.Debugf("Container: WAIT? '%v' !c.detach && wait: %v", c.Name, !c.detach && wait)
if !c.detach && wait {
var exitCode int
exitCode, c.Err = client.WaitContainer(c.Container.ID)
log.Debugf("Container: FINISHED '%v', exitCode: %v", c.Name, exitCode)
if exitCode != 0 {
c.Err = errors.New(fmt.Sprintf("Container %s exited with code %d", c.Name, exitCode))
}
return c
}
return c
}

View File

@@ -1,307 +0,0 @@
package docker
import (
"fmt"
"strings"
"testing"
"github.com/rancherio/os/config"
"github.com/rancherio/rancher-compose/librcompose/project"
"github.com/stretchr/testify/require"
dockerClient "github.com/fsouza/go-dockerclient"
"os"
)
func testDockerHost(t *testing.T) {
assert := require.New(t)
assert.Equal(os.Getenv("DOCKER_HOST"), config.DOCKER_HOST)
}
func TestHash(t *testing.T) {
assert := require.New(t)
hash := getHash(&config.ContainerConfig{
Id: "id",
Cmd: "1 2 3",
})
hash2 := getHash(&config.ContainerConfig{
Id: "id2",
Cmd: "1 2 3",
})
hash3 := getHash(&config.ContainerConfig{
Id: "id3",
Cmd: "1 2 3 4",
})
assert.Equal("d601444333c7fb4cb955bcca36c5ed59b6fa8c3f", hash, "")
assert.NotEqual(hash, hash2, "")
assert.NotEqual(hash2, hash3, "")
assert.NotEqual(hash, hash3, "")
}
func TestHash2(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Id: "docker-volumes",
Cmd: "",
MigrateVolumes: false,
ReloadConfig: false,
CreateOnly: true,
Service: &project.ServiceConfig{
CapAdd: nil,
CapDrop: nil,
CpuShares: 0,
Command: project.NewCommand(),
Detach: "",
Dns: project.NewStringorslice(),
DnsSearch: project.NewStringorslice(),
DomainName: "",
Entrypoint: project.NewCommand(),
EnvFile: project.NewStringorslice(),
Environment: project.NewMaporEqualSlice([]string{}),
Hostname: "",
Image: "state",
Labels: project.NewSliceorMap(map[string]string{
"io.rancher.os.createonly": "true",
"io.rancher.os.scope": "system"}),
Links: project.NewMaporColonSlice(nil),
LogDriver: "json-file",
MemLimit: 0,
Name: "",
Net: "none",
Pid: "",
Ipc: "",
Ports: nil,
Privileged: true,
Restart: "",
ReadOnly: true,
StdinOpen: false,
Tty: false,
User: "",
Volumes: []string{
"/var/lib/docker:/var/lib/docker",
"/var/lib/rancher/conf:/var/lib/rancher/conf",
"/var/lib/system-docker:/var/lib/system-docker"},
VolumesFrom: nil,
WorkingDir: "",
Expose: nil,
ExternalLinks: nil},
}
for i := 0; i < 1000; i++ {
assert.Equal(getHash(cfg), getHash(cfg), fmt.Sprintf("Failed at iteration: %v", i))
}
}
func TestBool2String(t *testing.T) {
assert := require.New(t)
assert.Equal("true", fmt.Sprint(true), "")
}
func TestParse(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--name c1 " +
"-d " +
"--rm " +
"--privileged " +
"test/image " +
"arg1 " +
"arg2 ",
}
c := NewContainer("", cfg).Parse()
assert.NoError(c.Err, "")
assert.Equal(cfg.Id, "c1", "Id doesn't match")
assert.Equal(c.Name, "c1", "Name doesn't match")
assert.True(c.remove, "Remove doesn't match")
assert.True(c.detach, "Detach doesn't match")
assert.Equal(c.Config.Cmd.Len(), 2, "Args doesn't match")
assert.Equal(c.Config.Cmd.Slice()[0], "arg1", "Arg1 doesn't match")
assert.Equal(c.Config.Cmd.Slice()[1], "arg2", "Arg2 doesn't match")
assert.True(c.HostConfig.Privileged, "Privileged doesn't match")
}
func TestIdFromName(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--name foo -v /test busybox echo hi",
}
assert.Equal("", cfg.Id)
NewContainer(config.DOCKER_HOST, cfg)
assert.Equal("foo", cfg.Id)
}
func testMigrateVolumes(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name foo -v /test busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
test_path, ok := c.Container.Volumes["/test"]
assert.True(ok, "")
c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
MigrateVolumes: true,
Cmd: "--name foo -v /test2 busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c2.Err, "")
assert.True(c2.Container != nil)
_, ok = c2.Container.Volumes["/test2"]
assert.True(ok, "")
assert.Equal(test_path, c2.Container.Volumes["/test"])
c.Delete()
c2.Delete()
}
func testRollback(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
assert.Equal("rollback", c.Container.Name)
c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo bye",
}).Parse().Start().Lookup()
assert.Equal("rollback", c2.Container.Name)
assert.NoError(c2.Err, "")
assert.NotEqual(c.Container.ID, c2.Container.ID)
c3 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c3.Err, "")
assert.Equal(c.Container.ID, c3.Container.ID)
assert.Equal("rollback", c3.Container.Name)
c2.Reset().Lookup()
assert.NoError(c2.Err, "")
assert.True(strings.HasPrefix(c2.Container.Name, "rollback-"))
c.Delete()
c2.Delete()
}
func testStart(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--pid=host --privileged --rm busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
assert.True(c.HostConfig.Privileged, "")
assert.True(c.Container.HostConfig.Privileged, "")
assert.Equal("host", c.Container.HostConfig.PidMode, "")
c.Delete()
}
func testLookup(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--rm busybox echo hi",
}
c := NewContainer(config.DOCKER_HOST, cfg).Parse().Start()
cfg2 := &config.ContainerConfig{
Cmd: "--rm busybox echo hi2",
}
c2 := NewContainer(config.DOCKER_HOST, cfg2).Parse().Start()
assert.NoError(c.Err, "")
assert.NoError(c2.Err, "")
c1Lookup := NewContainer(config.DOCKER_HOST, cfg).Lookup()
c2Lookup := NewContainer(config.DOCKER_HOST, cfg2).Lookup()
assert.NoError(c1Lookup.Err, "")
assert.NoError(c2Lookup.Err, "")
assert.Equal(c.Container.ID, c1Lookup.Container.ID, "")
assert.Equal(c2.Container.ID, c2Lookup.Container.ID, "")
c.Delete()
c2.Delete()
}
func testDelete(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--rm busybox echo hi",
}).Parse()
assert.False(c.Exists())
assert.NoError(c.Err, "")
c.Start()
assert.NoError(c.Err, "")
c.Reset()
assert.NoError(c.Err, "")
assert.True(c.Exists())
assert.NoError(c.Err, "")
c.Delete()
assert.NoError(c.Err, "")
c.Reset()
assert.False(c.Exists())
assert.NoError(c.Err, "")
}
func testDockerClientNames(t *testing.T) {
assert := require.New(t)
client, err := dockerClient.NewClient(config.DOCKER_HOST)
assert.NoError(err, "")
c, err := client.CreateContainer(dockerClient.CreateContainerOptions{
Name: "foo",
Config: &dockerClient.Config{
Image: "ubuntu",
},
})
assert.NoError(err, "")
assert.Equal("foo", c.Name)
c2, err := client.InspectContainer(c.ID)
assert.NoError(err, "")
assert.Equal("/foo", c2.Name)
c2, err = inspect(client, c.ID)
assert.NoError(err, "")
assert.Equal("foo", c2.Name)
client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: c2.ID,
Force: true,
})
}

55
docker/env.go Normal file
View File

@@ -0,0 +1,55 @@
package docker
import (
"fmt"
"strings"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/config"
)
type ConfigEnvironment struct {
cfg *config.CloudConfig
}
func NewConfigEnvironment(cfg *config.CloudConfig) *ConfigEnvironment {
return &ConfigEnvironment{
cfg: cfg,
}
}
func appendEnv(array []string, key, value string) []string {
parts := strings.SplitN(key, "/", 2)
if len(parts) == 2 {
key = parts[1]
}
return append(array, fmt.Sprintf("%s=%s", key, value))
}
func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
for _, key := range keys {
if strings.HasSuffix(key, "*") {
result := []string{}
for envKey, envValue := range cfg.Rancher.Environment {
keyPrefix := key[:len(key)-1]
if strings.HasPrefix(envKey, keyPrefix) {
result = appendEnv(result, envKey, envValue)
}
}
if len(result) > 0 {
return result
}
} else if value, ok := cfg.Rancher.Environment[key]; ok {
return appendEnv([]string{}, key, value)
}
}
return []string{}
}
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}

View File

@@ -1,103 +0,0 @@
package docker
import (
log "github.com/Sirupsen/logrus"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type ContainerFactory struct {
cfg *config.CloudConfig
}
type containerBasedService struct {
project.EmptyService
name string
project *project.Project
container *Container
serviceConfig *project.ServiceConfig
cfg *config.CloudConfig
}
func NewContainerFactory(cfg *config.CloudConfig) *ContainerFactory {
return &ContainerFactory{
cfg: cfg,
}
}
func (c *containerBasedService) Up() error {
container := c.container
containerCfg := c.container.ContainerCfg
fakeCreate := false
create := containerCfg.CreateOnly
if util.Contains(c.cfg.Rancher.Disable, c.name) {
fakeCreate = true
}
var event project.Event
c.project.Notify(project.CONTAINER_STARTING, c.name, map[string]string{})
if fakeCreate {
event = project.CONTAINER_CREATED
} else if create {
container.Create()
event = project.CONTAINER_CREATED
} else {
container.StartAndWait()
event = project.CONTAINER_STARTED
}
if container.Err != nil {
log.Errorf("Failed to run %v: %v", containerCfg.Id, container.Err)
}
if container.Err == nil && containerCfg.ReloadConfig {
return project.ErrRestart
}
if container.Container != nil {
c.project.Notify(event, c.name, map[string]string{
project.CONTAINER_ID: container.Container.ID,
})
}
return container.Err
}
func (c *containerBasedService) Config() *project.ServiceConfig {
return c.serviceConfig
}
func (c *containerBasedService) Name() string {
return c.name
}
func isSystemService(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
}
func (c *ContainerFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
host := config.DOCKER_HOST
if isSystemService(serviceConfig) {
host = config.DOCKER_SYSTEM_HOST
}
container := NewContainerFromService(host, name, serviceConfig)
if container.Err != nil {
return nil, container.Err
}
return &containerBasedService{
name: name,
project: project,
container: container,
serviceConfig: serviceConfig,
cfg: c.cfg,
}, nil
}

159
docker/service.go Normal file
View File

@@ -0,0 +1,159 @@
package docker
import (
"fmt"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/docker/machine/log"
"github.com/rancherio/os/config"
"github.com/samalba/dockerclient"
)
type Service struct {
*docker.Service
deps map[string][]string
context *docker.Context
}
func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context) *Service {
return &Service{
Service: docker.NewService(name, serviceConfig, context),
deps: factory.Deps,
context: context,
}
}
func (s *Service) DependentServices() []project.ServiceRelationship {
rels := s.Service.DependentServices()
for _, dep := range s.deps[s.Name()] {
rels = appendLink(rels, dep, true)
}
if s.requiresSyslog() {
rels = appendLink(rels, "syslog", false)
}
if s.requiresUserDocker() {
// Linking to cloud-init is a hack really. The problem is we need to link to something
// that will trigger a reload
rels = appendLink(rels, "cloud-init", false)
} else if s.missingImage() {
rels = appendLink(rels, "network", false)
}
return rels
}
func (s *Service) missingImage() bool {
image := s.Config().Image
if image == "" {
return false
}
client := s.context.ClientFactory.Create(s)
i, err := client.InspectImage(s.Config().Image)
return err != nil || i == nil
}
func (s *Service) requiresSyslog() bool {
return s.Config().LogDriver == "syslog"
}
func (s *Service) requiresUserDocker() bool {
return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM
}
func appendLink(deps []project.ServiceRelationship, name string, optional bool) []project.ServiceRelationship {
rel := project.NewServiceRelationship(name, project.REL_TYPE_LINK)
rel.Optional = optional
return append(deps, rel)
}
func (s *Service) Up() error {
labels := s.Config().Labels.MapParts()
if err := s.Service.Create(); err != nil {
return err
}
if err := s.rename(); err != nil {
return err
}
if labels[config.CREATE_ONLY] == "true" {
return s.checkReload(labels)
}
if err := s.Service.Up(); err != nil {
return err
}
if labels[config.DETACH] == "false" {
if err := s.wait(); err != nil {
return err
}
}
return s.checkReload(labels)
}
func (s *Service) checkReload(labels map[string]string) error {
if labels[config.RELOAD_CONFIG] == "true" {
return project.ErrRestart
}
return nil
}
func (s *Service) Create() error {
if err := s.Service.Create(); err != nil {
return err
}
return s.rename()
}
func (s *Service) getContainer() (dockerclient.Client, *dockerclient.ContainerInfo, error) {
containers, err := s.Service.Containers()
if err != nil {
return nil, nil, err
}
if len(containers) == 0 {
return nil, nil, nil
}
id, err := containers[0].Id()
if err != nil {
return nil, nil, err
}
client := s.context.ClientFactory.Create(s)
info, err := client.InspectContainer(id)
return client, info, err
}
func (s *Service) wait() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
status := <-client.Wait(info.Id)
if status.Error != nil {
return status.Error
}
if status.ExitCode == 0 {
return nil
} else {
return fmt.Errorf("ExitCode %d", status.ExitCode)
}
}
func (s *Service) rename() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
if len(info.Name) > 0 && info.Name[1:] != s.Name() {
log.Debugf("Renaming container %s => %s", info.Name[1:], s.Name())
return client.RenameContainer(info.Name[1:], s.Name())
} else {
return nil
}
}

27
docker/service_factory.go Normal file
View File

@@ -0,0 +1,27 @@
package docker
import (
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/util"
)
type ServiceFactory struct {
Context *docker.Context
Deps map[string][]string
}
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" {
for _, dep := range util.TrimSplit(after, ",") {
s.Deps[name] = append(s.Deps[name], dep)
}
}
if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" {
for _, dep := range util.TrimSplit(before, ",") {
s.Deps[dep] = append(s.Deps[dep], name)
}
}
return NewService(s, name, serviceConfig, s.Context), nil
}

View File

@@ -1,137 +0,0 @@
package docker
import (
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type configEnvironment struct {
cfg *config.CloudConfig
}
func appendEnv(array []string, key, value string) []string {
parts := strings.SplitN(key, "/", 2)
if len(parts) == 2 {
key = parts[1]
}
return append(array, fmt.Sprintf("%s=%s", key, value))
}
func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
for _, key := range keys {
if strings.HasSuffix(key, "*") {
result := []string{}
for envKey, envValue := range cfg.Rancher.Environment {
keyPrefix := key[:len(key)-1]
if strings.HasPrefix(envKey, keyPrefix) {
result = appendEnv(result, envKey, envValue)
}
}
if len(result) > 0 {
return result
}
} else if value, ok := cfg.Rancher.Environment[key]; ok {
return appendEnv([]string{}, key, value)
}
}
return []string{}
}
func (c *configEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}
func RunServices(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) error {
network := false
projectEvents := make(chan project.ProjectEvent)
p := project.NewProject(name, NewContainerFactory(cfg))
p.EnvironmentLookup = &configEnvironment{cfg: cfg}
p.AddListener(projectEvents)
enabled := make(map[string]bool)
for name, serviceConfig := range configs {
if err := p.AddConfig(name, serviceConfig); err != nil {
log.Infof("Failed loading service %s", name)
continue
}
enabled[name] = true
}
p.ReloadCallback = func() error {
if p.Name != "system-init" {
return nil
}
if err := cfg.Reload(); err != nil {
return err
}
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if !serviceEnabled {
continue
}
if en, ok := enabled[service]; ok && en {
continue
}
bytes, err := LoadServiceResource(service, network, cfg)
if err != nil {
if err == util.ErrNoNetwork {
log.Debugf("Can not load %s, networking not enabled", service)
} else {
log.Errorf("Failed to load %s : %v", service, err)
}
continue
}
if err := p.Load(bytes); err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = true
}
for service, config := range cfg.Rancher.Services {
if en, ok := enabled[service]; ok && en {
continue
}
if err := p.AddConfig(service, config); err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = true
}
return nil
}
go func() {
for event := range projectEvents {
if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" {
network = true
}
}
}()
if err := p.ReloadCallback(); err != nil {
log.Errorf("Failed to reload %s : %v", name, err)
return err
}
return p.Up()
}
func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) {
return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray())
}

11
docker/util.go Normal file
View File

@@ -0,0 +1,11 @@
package docker
import (
"github.com/docker/libcompose/project"
"github.com/rancherio/os/config"
)
func IsSystemContainer(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
}