1
0
mirror of https://github.com/rancher/os.git synced 2025-06-23 13:37:03 +00:00

Refactor to use libcompose

This commit is contained in:
Darren Shepherd 2015-08-04 14:45:38 -07:00
parent 19f9a1b281
commit 9d76b79ac3
26 changed files with 665 additions and 1458 deletions

View File

@ -47,12 +47,6 @@ func Main() {
HideHelp: true,
Subcommands: serviceSubCommands(),
},
//{
// Name: "reload",
// ShortName: "a",
// Usage: "reload configuration of a service and restart the container",
// Action: reload,
//},
{
Name: "os",
Usage: "operating system upgrade/downgrade",

View File

@ -15,7 +15,9 @@ import (
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/codegangsta/cli"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/cmd/power"
"github.com/rancherio/os/compose"
"github.com/rancherio/os/config"
"github.com/rancherio/os/docker"
)
@ -147,7 +149,9 @@ func osUpgrade(c *cli.Context) {
if c.Args().Present() {
log.Fatalf("invalid arguments %v", c.Args())
}
startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot"))
if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot")); err != nil {
log.Fatal(err)
}
}
func osVersion(c *cli.Context) {
@ -164,22 +168,28 @@ func yes(in *bufio.Reader, question string) bool {
return strings.ToLower(line[0:1]) == "y"
}
func startUpgradeContainer(image string, stage, force, reboot bool) {
func startUpgradeContainer(image string, stage, force, reboot bool) error {
in := bufio.NewReader(os.Stdin)
container := docker.NewContainer(config.DOCKER_SYSTEM_HOST, &config.ContainerConfig{
Cmd: "--name=os-upgrade " +
"--log-driver=json-file " +
"--rm " +
"--privileged " +
"--net=host " +
image + " " +
"-t rancher-upgrade " +
"-r " + config.VERSION,
}).Stage()
container, err := compose.CreateService(nil, "os-upgrade", &project.ServiceConfig{
LogDriver: "json-file",
Privileged: true,
Net: "host",
Image: image,
Labels: project.NewSliceorMap(map[string]string{
config.SCOPE: config.SYSTEM,
}),
Command: project.NewCommand(
"-t", "rancher-upgrade",
"-r", config.VERSION,
),
})
if err != nil {
return err
}
if container.Err != nil {
log.Fatal(container.Err)
if err := container.Pull(); err != nil {
return err
}
if !stage {
@ -191,46 +201,25 @@ func startUpgradeContainer(image string, stage, force, reboot bool) {
}
}
container.Start()
if container.Err != nil {
log.Fatal(container.Err)
if err := container.Start(); err != nil {
return err
}
client, err := docker.NewClient(config.DOCKER_SYSTEM_HOST)
if err != nil {
log.Fatal(err)
if err := container.Log(); err != nil {
return err
}
go func() {
client.Logs(dockerClient.LogsOptions{
Container: container.Container.ID,
OutputStream: os.Stdout,
ErrorStream: os.Stderr,
Follow: true,
Stdout: true,
Stderr: true,
})
}()
exit, err := client.WaitContainer(container.Container.ID)
if err != nil {
log.Fatal(err)
if err := container.Up(); err != nil {
return err
}
if container.Err != nil {
log.Fatal(container.Err)
}
if exit == 0 {
if reboot && (force || yes(in, "Continue with reboot")) {
log.Info("Rebooting")
power.Reboot()
}
} else {
log.Error("Upgrade failed")
os.Exit(exit)
}
}
return nil
}
func parseBody(body []byte) (*Images, error) {

View File

@ -1,53 +0,0 @@
package control
import (
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/rancherio/os/config"
"github.com/rancherio/os/docker"
)
//func parseContainers(cfg *config.Config) map[string]*docker.Container {
// result := map[string]*docker.Container{}
//
// for _, containerConfig := range cfg.SystemContainers {
// container := docker.NewContainer(config.DOCKER_SYSTEM_HOST, &containerConfig)
// if containerConfig.Id != "" {
// result[containerConfig.Id] = container
// }
// }
//
// return result
//}
func reload(c *cli.Context) {
_, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
containers := map[string]*docker.Container{} //parseContainers(cfg)
toStart := make([]*docker.Container, 0, len(c.Args()))
for _, id := range c.Args() {
if container, ok := containers[id]; ok {
toStart = append(toStart, container.Stage())
}
}
var firstErr error
for _, c := range toStart {
err := c.Start().Err
if err != nil {
log.Errorf("Failed to start %s : %v", c.ContainerCfg.Id, err)
if firstErr != nil {
firstErr = err
}
}
}
if firstErr != nil {
log.Fatal(firstErr)
}
}

View File

@ -6,8 +6,8 @@ import (
"strings"
"github.com/codegangsta/cli"
"github.com/rancherio/os/compose"
"github.com/rancherio/os/config"
"github.com/rancherio/os/docker"
"github.com/rancherio/os/util"
)
@ -93,7 +93,7 @@ func enable(c *cli.Context) {
if strings.HasPrefix(service, "/") && !strings.HasPrefix(service, "/var/lib/rancher/conf") {
log.Fatalf("ERROR: Service should be in path /var/lib/rancher/conf")
}
if _, err := docker.LoadServiceResource(service, true, cfg); err != nil {
if _, err := compose.LoadServiceResource(service, true, cfg); err != nil {
log.Fatalf("could not load service %s", service)
}
cfg.Rancher.ServicesInclude[service] = true

162
compose/project.go Normal file
View File

@ -0,0 +1,162 @@
package compose
import (
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/cli/logger"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/config"
rosDocker "github.com/rancherio/os/docker"
"github.com/rancherio/os/util"
)
func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if cfg == nil {
var err error
cfg, err = config.LoadConfig()
if err != nil {
return nil, err
}
}
p, err := RunServiceSet("once", cfg, map[string]*project.ServiceConfig{
name: serviceConfig,
})
if err != nil {
return nil, err
}
return p.CreateService(name)
}
func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) {
p, err := newProject(name, cfg)
if err != nil {
return nil, err
}
addServices(p, cfg, map[string]string{}, configs)
return p, p.Up()
}
func RunServices(cfg *config.CloudConfig) error {
p, err := newCoreServiceProject(cfg)
if err != nil {
return err
}
return p.Up()
}
func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) {
clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{})
if err != nil {
return nil, err
}
serviceFactory := &rosDocker.ServiceFactory{
Deps: map[string][]string{},
}
context := &docker.Context{
ClientFactory: clientFactory,
Context: project.Context{
ProjectName: name,
EnvironmentLookup: rosDocker.NewConfigEnvironment(cfg),
ServiceFactory: serviceFactory,
Rebuild: true,
Log: cfg.Rancher.Log,
LoggerFactory: logger.NewColorLoggerFactory(),
},
}
serviceFactory.Context = context
return docker.NewProject(context)
}
func addServices(p *project.Project, cfg *config.CloudConfig, enabled map[string]string, configs map[string]*project.ServiceConfig) {
// Note: we ignore errors while loading services
for name, serviceConfig := range cfg.Rancher.Services {
hash := project.GetServiceHash(name, *serviceConfig)
if enabled[name] == hash {
continue
}
if err := p.AddConfig(name, serviceConfig); err != nil {
log.Infof("Failed loading service %s", name)
continue
}
enabled[name] = hash
}
}
func newCoreServiceProject(cfg *config.CloudConfig) (*project.Project, error) {
network := false
projectEvents := make(chan project.ProjectEvent)
enabled := make(map[string]string)
p, err := newProject("os", cfg)
if err != nil {
return nil, err
}
p.AddListener(project.NewDefaultListener(p))
p.AddListener(projectEvents)
p.ReloadCallback = func() error {
err := cfg.Reload()
if err != nil {
return err
}
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if enabled[service] != "" || !serviceEnabled {
continue
}
bytes, err := LoadServiceResource(service, network, cfg)
if err != nil {
if err == util.ErrNoNetwork {
log.Debugf("Can not load %s, networking not enabled", service)
} else {
log.Errorf("Failed to load %s : %v", service, err)
}
continue
}
err = p.Load(bytes)
if err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = service
}
addServices(p, cfg, enabled, cfg.Rancher.Services)
return nil
}
go func() {
for event := range projectEvents {
if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" {
network = true
}
}
}()
err = p.ReloadCallback()
if err != nil {
log.Errorf("Failed to reload os: %v", err)
return nil, err
}
return p, nil
}
func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) {
return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray())
}

View File

@ -5,8 +5,8 @@ import (
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
"gopkg.in/yaml.v2"
)
@ -139,18 +139,6 @@ func Dump(private, full bool) (string, error) {
return string(bytes), err
}
func (c *CloudConfig) configureConsole() error {
if console, ok := c.Rancher.Services[CONSOLE_CONTAINER]; ok {
if c.Rancher.Console.Persistent {
console.Labels.MapParts()[REMOVE] = "false"
} else {
console.Labels.MapParts()[REMOVE] = "true"
}
}
return nil
}
func (c *CloudConfig) amendNils() error {
if c.Rancher.Environment == nil {
c.Rancher.Environment = map[string]string{}
@ -173,7 +161,6 @@ func (c *CloudConfig) amendNils() error {
func (c *CloudConfig) readGlobals() error {
return util.ShortCircuit(
c.readCmdline,
c.configureConsole, // TODO: this smells (it is a write hidden inside a read)
)
}
@ -216,27 +203,6 @@ func (c *CloudConfig) Set(key string, value interface{}) error {
return c.Reload()
}
func (d *DockerConfig) BridgeConfig() (string, string) {
var name, cidr string
args := append(d.Args, d.ExtraArgs...)
for i, opt := range args {
if opt == "-b" && i < len(args)-1 {
name = args[i+1]
}
if opt == "--fixed-cidr" && i < len(args)-1 {
cidr = args[i+1]
}
}
if name == "" || name == "none" {
return "", ""
} else {
return name, cidr
}
}
func (r Repositories) ToArray() []string {
result := make([]string, 0, len(r))
for _, repo := range r {

View File

@ -2,12 +2,11 @@ package config
import (
"github.com/coreos/coreos-cloudinit/config"
"github.com/docker/libcompose/project"
"github.com/rancher/netconf"
"github.com/rancherio/rancher-compose/librcompose/project"
)
const (
CONSOLE_CONTAINER = "console"
DOCKER_BIN = "/usr/bin/docker"
ROS_BIN = "/usr/bin/ros"
SYSINIT_BIN = "/usr/bin/ros-sysinit"
@ -24,7 +23,6 @@ const (
HASH = "io.rancher.os.hash"
ID = "io.rancher.os.id"
DETACH = "io.rancher.os.detach"
REMOVE = "io.rancher.os.remove"
CREATE_ONLY = "io.rancher.os.createonly"
RELOAD_CONFIG = "io.rancher.os.reloadconfig"
SCOPE = "io.rancher.os.scope"
@ -73,8 +71,8 @@ type RancherConfig struct {
Autoformat map[string]*project.ServiceConfig `yaml:"autoformat,omitempty"`
BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"`
CloudInit CloudInit `yaml:"cloud_init,omitempty"`
Console ConsoleConfig `yaml:"console,omitempty"`
Debug bool `yaml:"debug,omitempty"`
Log bool `yaml:"log,omitempty"`
Disable []string `yaml:"disable,omitempty"`
ServicesInclude map[string]bool `yaml:"services_include,omitempty"`
Modules []string `yaml:"modules,omitempty"`
@ -84,15 +82,9 @@ type RancherConfig struct {
State StateConfig `yaml:"state,omitempty"`
SystemDocker DockerConfig `yaml:"system_docker,omitempty"`
Upgrade UpgradeConfig `yaml:"upgrade,omitempty"`
UserContainers []ContainerConfig `yaml:"user_containers,omitempty"`
UserDocker DockerConfig `yaml:"user_docker,omitempty"`
}
type ConsoleConfig struct {
Tail bool `yaml:"tail,omitempty"`
Persistent bool `yaml:"persistent,omitempty"`
}
type UpgradeConfig struct {
Url string `yaml:"url,omitempty"`
Image string `yaml:"image,omitempty"`

View File

@ -1,10 +1,6 @@
package docker
import (
"time"
log "github.com/Sirupsen/logrus"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancherio/os/config"
)
@ -28,25 +24,10 @@ func NewClient(endpoint string) (*dockerClient.Client, error) {
return nil, err
}
retry := false
for i := 0; i < (MAX_WAIT / INTERVAL); i++ {
_, err = client.Info()
if err == nil {
break
}
err = ClientOK(endpoint, func() bool {
_, err := client.Info()
return err == nil
})
retry = true
log.Infof("Waiting for Docker at %s", endpoint)
time.Sleep(INTERVAL * time.Millisecond)
}
if err != nil {
return nil, err
}
if retry {
log.Infof("Connected to Docker at %s", endpoint)
}
return client, nil
return client, err
}

94
docker/client_factory.go Normal file
View File

@ -0,0 +1,94 @@
package docker
import (
"fmt"
"sync"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/docker/machine/log"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/samalba/dockerclient"
)
type ClientFactory struct {
userClient dockerclient.Client
systemClient dockerclient.Client
userOnce sync.Once
systemOnce sync.Once
}
func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) {
userOpts := opts
systemOpts := opts
userOpts.Host = config.DOCKER_HOST
systemOpts.Host = config.DOCKER_SYSTEM_HOST
userClient, err := docker.CreateClient(userOpts)
if err != nil {
return nil, err
}
systemClient, err := docker.CreateClient(systemOpts)
if err != nil {
return nil, err
}
return &ClientFactory{
userClient: userClient,
systemClient: systemClient,
}, nil
}
func (c *ClientFactory) Create(service project.Service) dockerclient.Client {
if IsSystemContainer(service.Config()) {
waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST)
return c.systemClient
}
waitFor(&c.userOnce, c.userClient, config.DOCKER_HOST)
return c.userClient
}
func waitFor(once *sync.Once, client dockerclient.Client, endpoint string) {
once.Do(func() {
err := ClientOK(endpoint, func() bool {
_, err := client.Info()
return err == nil
})
if err != nil {
panic(err.Error())
}
})
}
func ClientOK(endpoint string, test func() bool) error {
backoff := util.Backoff{}
defer backoff.Close()
var err error
retry := false
for ok := range backoff.Start() {
if !ok {
err = fmt.Errorf("Timeout waiting for Docker at %s", endpoint)
break
}
if test() {
break
}
retry = true
log.Infof("Waiting for Docker at %s", endpoint)
}
if err != nil {
return err
}
if retry {
log.Infof("Connected to Docker at %s", endpoint)
}
return nil
}

View File

@ -1,632 +0,0 @@
package docker
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
log "github.com/Sirupsen/logrus"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/runconfig"
shlex "github.com/flynn/go-shlex"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/docker"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type Container struct {
Err error
Name string
remove bool
detach bool
Config *runconfig.Config
HostConfig *runconfig.HostConfig
dockerHost string
Container *dockerClient.Container
ContainerCfg *config.ContainerConfig
}
type ByCreated []dockerClient.APIContainers
func (c ByCreated) Len() int { return len(c) }
func (c ByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c ByCreated) Less(i, j int) bool { return c[j].Created < c[i].Created }
func getHash(containerCfg *config.ContainerConfig) string {
hash := sha1.New()
io.WriteString(hash, fmt.Sprintln(containerCfg.Id))
io.WriteString(hash, fmt.Sprintln(containerCfg.Cmd))
io.WriteString(hash, fmt.Sprintln(containerCfg.MigrateVolumes))
io.WriteString(hash, fmt.Sprintln(containerCfg.ReloadConfig))
io.WriteString(hash, fmt.Sprintln(containerCfg.CreateOnly))
if containerCfg.Service != nil {
//Get values of Service through reflection
val := reflect.ValueOf(containerCfg.Service).Elem()
//Create slice to sort the keys in Service Config, which allow constant hash ordering
serviceKeys := []string{}
//Create a data structure of map of values keyed by a string
unsortedKeyValue := make(map[string]interface{})
//Get all keys and values in Service Configuration
for i := 0; i < val.NumField(); i++ {
valueField := val.Field(i)
keyField := val.Type().Field(i)
serviceKeys = append(serviceKeys, keyField.Name)
unsortedKeyValue[keyField.Name] = valueField.Interface()
}
//Sort serviceKeys alphabetically
sort.Strings(serviceKeys)
//Go through keys and write hash
for _, serviceKey := range serviceKeys {
serviceValue := unsortedKeyValue[serviceKey]
io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey))
switch s := serviceValue.(type) {
case project.SliceorMap:
sliceKeys := []string{}
for lkey := range s.MapParts() {
if lkey != "io.rancher.os.hash" {
sliceKeys = append(sliceKeys, lkey)
}
}
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s.MapParts()[sliceKey]))
}
case project.MaporEqualSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.MaporColonSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.MaporSpaceSlice:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.Command:
sliceKeys := s.Slice()
// do not sort keys as the order matters
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case project.Stringorslice:
sliceKeys := s.Slice()
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case []string:
sliceKeys := s
sort.Strings(sliceKeys)
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
default:
io.WriteString(hash, fmt.Sprintf("%v", serviceValue))
}
}
}
return hex.EncodeToString(hash.Sum(nil))
}
func StartAndWait(dockerHost string, containerCfg *config.ContainerConfig) error {
container := NewContainer(dockerHost, containerCfg).start(false, true)
return container.Err
}
func NewContainerFromService(dockerHost string, name string, service *project.ServiceConfig) *Container {
c := &Container{
Name: name,
dockerHost: dockerHost,
ContainerCfg: &config.ContainerConfig{
Id: name,
Service: service,
},
}
return c.Parse()
}
func NewContainer(dockerHost string, containerCfg *config.ContainerConfig) *Container {
c := &Container{
dockerHost: dockerHost,
ContainerCfg: containerCfg,
}
return c.Parse()
}
func (c *Container) returnErr(err error) *Container {
c.Err = err
return c
}
func getByLabel(client *dockerClient.Client, key, value string) (*dockerClient.APIContainers, error) {
containers, err := client.ListContainers(dockerClient.ListContainersOptions{
All: true,
Filters: map[string][]string{
config.LABEL: {fmt.Sprintf("%s=%s", key, value)},
},
})
if err != nil {
return nil, err
}
if len(containers) == 0 {
return nil, nil
}
sort.Sort(ByCreated(containers))
return &containers[0], nil
}
func (c *Container) Lookup() *Container {
c.Parse()
if c.Err != nil || (c.Container != nil && c.Container.HostConfig != nil) {
return c
}
hash := getHash(c.ContainerCfg)
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
containers, err := client.ListContainers(dockerClient.ListContainersOptions{
All: true,
Filters: map[string][]string{
config.LABEL: {fmt.Sprintf("%s=%s", config.HASH, hash)},
},
})
if err != nil {
return c.returnErr(err)
}
if len(containers) == 0 {
return c
}
c.Container, c.Err = inspect(client, containers[0].ID)
return c
}
func inspect(client *dockerClient.Client, id string) (*dockerClient.Container, error) {
c, err := client.InspectContainer(id)
if err != nil {
return nil, err
}
if strings.HasPrefix(c.Name, "/") {
c.Name = c.Name[1:]
}
return c, err
}
func (c *Container) Exists() bool {
c.Lookup()
return c.Container != nil
}
func (c *Container) Reset() *Container {
c.Config = nil
c.HostConfig = nil
c.Container = nil
c.Err = nil
return c
}
func (c *Container) requiresSyslog() bool {
return (c.ContainerCfg.Service.LogDriver == "" || c.ContainerCfg.Service.LogDriver == "syslog")
}
func (c *Container) requiresUserDocker() bool {
if c.dockerHost == config.DOCKER_HOST {
return true
}
return false
}
func (c *Container) hasLink(link string) bool {
return util.Contains(c.ContainerCfg.Service.Links.Slice(), link)
}
func (c *Container) addLink(link string) {
if c.hasLink(link) {
return
}
log.Debugf("Adding %s link to %s", link, c.Name)
c.ContainerCfg.Service.Links = project.NewMaporColonSlice(append(c.ContainerCfg.Service.Links.Slice(), link))
}
func (c *Container) parseService() {
if c.requiresSyslog() {
c.addLink("syslog")
}
if c.requiresUserDocker() {
c.addLink("dockerwait")
} else if c.ContainerCfg.Service.Image != "" {
client, err := NewClient(c.dockerHost)
if err != nil {
c.Err = err
return
}
i, _ := client.InspectImage(c.ContainerCfg.Service.Image)
if i == nil {
c.addLink("network")
}
}
cfg, hostConfig, err := docker.Convert(c.ContainerCfg.Service)
if err != nil {
c.Err = err
return
}
c.Config = cfg
c.HostConfig = hostConfig
c.detach = c.Config.Labels[config.DETACH] != "false"
c.remove = c.Config.Labels[config.REMOVE] != "false"
c.ContainerCfg.CreateOnly = c.Config.Labels[config.CREATE_ONLY] == "true"
c.ContainerCfg.ReloadConfig = c.Config.Labels[config.RELOAD_CONFIG] == "true"
}
func (c *Container) parseCmd() {
flags := flag.NewFlagSet("run", flag.ExitOnError)
flRemove := flags.Bool([]string{"#rm", "-rm"}, false, "")
flDetach := flags.Bool([]string{"d", "-detach"}, false, "")
flName := flags.String([]string{"#name", "-name"}, "", "")
args, err := shlex.Split(c.ContainerCfg.Cmd)
if err != nil {
c.Err = err
return
}
log.Debugf("Parsing [%s]", strings.Join(args, ","))
c.Config, c.HostConfig, _, c.Err = runconfig.Parse(flags, args)
c.Name = *flName
c.detach = *flDetach
c.remove = *flRemove
}
func (c *Container) Parse() *Container {
if c.Config != nil || c.Err != nil {
return c
}
if len(c.ContainerCfg.Cmd) > 0 {
c.parseCmd()
} else if c.ContainerCfg.Service != nil {
c.parseService()
} else {
c.Err = errors.New("Cmd or Service must be set")
return c
}
if c.ContainerCfg.Id == "" {
c.ContainerCfg.Id = c.Name
}
return c
}
func (c *Container) Create() *Container {
return c.start(true, false)
}
func (c *Container) Start() *Container {
return c.start(false, false)
}
func (c *Container) StartAndWait() *Container {
return c.start(false, true)
}
func (c *Container) Stage() *Container {
c.Parse()
if c.Err != nil {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
c.Err = err
return c
}
_, err = client.InspectImage(c.Config.Image)
if err == dockerClient.ErrNoSuchImage {
toPull := c.Config.Image
_, tag := parsers.ParseRepositoryTag(toPull)
if tag == "" {
toPull += ":latest"
}
c.Err = client.PullImage(dockerClient.PullImageOptions{
Repository: toPull,
OutputStream: os.Stdout,
}, dockerClient.AuthConfiguration{})
} else if err != nil {
log.Errorf("Failed to stage: %s: %v", c.Config.Image, err)
c.Err = err
}
return c
}
func (c *Container) Delete() *Container {
c.Parse()
c.Stage()
c.Lookup()
if c.Err != nil {
return c
}
if !c.Exists() {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
err = client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: c.Container.ID,
Force: true,
})
if err != nil {
return c.returnErr(err)
}
return c
}
func (c *Container) renameCurrent(client *dockerClient.Client) error {
if c.Name == "" {
return nil
}
if c.Name == c.Container.Name {
return nil
}
err := client.RenameContainer(dockerClient.RenameContainerOptions{ID: c.Container.ID, Name: c.Name})
if err != nil {
return err
}
c.Container, err = inspect(client, c.Container.ID)
return err
}
func (c *Container) renameOld(client *dockerClient.Client, opts *dockerClient.CreateContainerOptions) error {
if len(opts.Name) == 0 {
return nil
}
existing, err := inspect(client, opts.Name)
if _, ok := err.(*dockerClient.NoSuchContainer); ok {
return nil
}
if err != nil {
return nil
}
if c.Container != nil && existing.ID == c.Container.ID {
return nil
}
var newName string
if label, ok := existing.Config.Labels[config.HASH]; ok {
newName = fmt.Sprintf("%s-%s", existing.Name, label)
} else {
newName = fmt.Sprintf("%s-unknown-%s", existing.Name, util.RandSeq(12))
}
if existing.State.Running {
err := client.StopContainer(existing.ID, 2)
if err != nil {
return err
}
_, err = client.WaitContainer(existing.ID)
if err != nil {
return err
}
}
log.Debugf("Renaming %s to %s", existing.Name, newName)
return client.RenameContainer(dockerClient.RenameContainerOptions{ID: existing.ID, Name: newName})
}
func (c *Container) getCreateOpts(client *dockerClient.Client) (*dockerClient.CreateContainerOptions, error) {
bytes, err := json.Marshal(c)
if err != nil {
log.Errorf("Failed to marshall: %v", c)
return nil, err
}
var opts dockerClient.CreateContainerOptions
err = json.Unmarshal(bytes, &opts)
if err != nil {
log.Errorf("Failed to unmarshall: %s", string(bytes))
return nil, err
}
if opts.Config.Labels == nil {
opts.Config.Labels = make(map[string]string)
}
hash := getHash(c.ContainerCfg)
opts.Config.Labels[config.HASH] = hash
opts.Config.Labels[config.ID] = c.ContainerCfg.Id
return &opts, nil
}
func appendVolumesFrom(client *dockerClient.Client, containerCfg *config.ContainerConfig, opts *dockerClient.CreateContainerOptions) error {
if !containerCfg.MigrateVolumes {
return nil
}
container, err := getByLabel(client, config.ID, containerCfg.Id)
if err != nil || container == nil {
return err
}
if opts.HostConfig.VolumesFrom == nil {
opts.HostConfig.VolumesFrom = []string{container.ID}
} else {
opts.HostConfig.VolumesFrom = append(opts.HostConfig.VolumesFrom, container.ID)
}
return nil
}
func (c *Container) start(createOnly, wait bool) *Container {
log.Debugf("Container: STARTING '%v', createOnly: %v, !detach: %v, wait: %v", c.Name, createOnly, !c.detach, wait)
c.Lookup()
c.Stage()
if c.Err != nil {
return c
}
client, err := NewClient(c.dockerHost)
if err != nil {
return c.returnErr(err)
}
created := false
opts, err := c.getCreateOpts(client)
if err != nil {
log.Errorf("Failed to create container create options: %v", err)
return c.returnErr(err)
}
if c.Exists() && c.remove {
log.Debugf("Deleting container %s", c.Container.ID)
c.Delete()
if c.Err != nil {
return c
}
c.Reset().Lookup()
if c.Err != nil {
return c
}
}
if !c.Exists() {
err = c.renameOld(client, opts)
if err != nil {
return c.returnErr(err)
}
err := appendVolumesFrom(client, c.ContainerCfg, opts)
if err != nil {
return c.returnErr(err)
}
c.Container, err = client.CreateContainer(*opts)
created = true
if err != nil {
return c.returnErr(err)
}
}
hostConfig := c.Container.HostConfig
if created {
hostConfig = opts.HostConfig
}
if createOnly {
return c
}
if !c.Container.State.Running {
if !created {
err = c.renameOld(client, opts)
if err != nil {
return c.returnErr(err)
}
}
err = c.renameCurrent(client)
if err != nil {
return c.returnErr(err)
}
err = client.StartContainer(c.Container.ID, hostConfig)
if err != nil {
log.Errorf("Error from Docker %s", err)
return c.returnErr(err)
}
}
log.Debugf("Container: WAIT? '%v' !c.detach && wait: %v", c.Name, !c.detach && wait)
if !c.detach && wait {
var exitCode int
exitCode, c.Err = client.WaitContainer(c.Container.ID)
log.Debugf("Container: FINISHED '%v', exitCode: %v", c.Name, exitCode)
if exitCode != 0 {
c.Err = errors.New(fmt.Sprintf("Container %s exited with code %d", c.Name, exitCode))
}
return c
}
return c
}

View File

@ -1,307 +0,0 @@
package docker
import (
"fmt"
"strings"
"testing"
"github.com/rancherio/os/config"
"github.com/rancherio/rancher-compose/librcompose/project"
"github.com/stretchr/testify/require"
dockerClient "github.com/fsouza/go-dockerclient"
"os"
)
func testDockerHost(t *testing.T) {
assert := require.New(t)
assert.Equal(os.Getenv("DOCKER_HOST"), config.DOCKER_HOST)
}
func TestHash(t *testing.T) {
assert := require.New(t)
hash := getHash(&config.ContainerConfig{
Id: "id",
Cmd: "1 2 3",
})
hash2 := getHash(&config.ContainerConfig{
Id: "id2",
Cmd: "1 2 3",
})
hash3 := getHash(&config.ContainerConfig{
Id: "id3",
Cmd: "1 2 3 4",
})
assert.Equal("d601444333c7fb4cb955bcca36c5ed59b6fa8c3f", hash, "")
assert.NotEqual(hash, hash2, "")
assert.NotEqual(hash2, hash3, "")
assert.NotEqual(hash, hash3, "")
}
func TestHash2(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Id: "docker-volumes",
Cmd: "",
MigrateVolumes: false,
ReloadConfig: false,
CreateOnly: true,
Service: &project.ServiceConfig{
CapAdd: nil,
CapDrop: nil,
CpuShares: 0,
Command: project.NewCommand(),
Detach: "",
Dns: project.NewStringorslice(),
DnsSearch: project.NewStringorslice(),
DomainName: "",
Entrypoint: project.NewCommand(),
EnvFile: project.NewStringorslice(),
Environment: project.NewMaporEqualSlice([]string{}),
Hostname: "",
Image: "state",
Labels: project.NewSliceorMap(map[string]string{
"io.rancher.os.createonly": "true",
"io.rancher.os.scope": "system"}),
Links: project.NewMaporColonSlice(nil),
LogDriver: "json-file",
MemLimit: 0,
Name: "",
Net: "none",
Pid: "",
Ipc: "",
Ports: nil,
Privileged: true,
Restart: "",
ReadOnly: true,
StdinOpen: false,
Tty: false,
User: "",
Volumes: []string{
"/var/lib/docker:/var/lib/docker",
"/var/lib/rancher/conf:/var/lib/rancher/conf",
"/var/lib/system-docker:/var/lib/system-docker"},
VolumesFrom: nil,
WorkingDir: "",
Expose: nil,
ExternalLinks: nil},
}
for i := 0; i < 1000; i++ {
assert.Equal(getHash(cfg), getHash(cfg), fmt.Sprintf("Failed at iteration: %v", i))
}
}
func TestBool2String(t *testing.T) {
assert := require.New(t)
assert.Equal("true", fmt.Sprint(true), "")
}
func TestParse(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--name c1 " +
"-d " +
"--rm " +
"--privileged " +
"test/image " +
"arg1 " +
"arg2 ",
}
c := NewContainer("", cfg).Parse()
assert.NoError(c.Err, "")
assert.Equal(cfg.Id, "c1", "Id doesn't match")
assert.Equal(c.Name, "c1", "Name doesn't match")
assert.True(c.remove, "Remove doesn't match")
assert.True(c.detach, "Detach doesn't match")
assert.Equal(c.Config.Cmd.Len(), 2, "Args doesn't match")
assert.Equal(c.Config.Cmd.Slice()[0], "arg1", "Arg1 doesn't match")
assert.Equal(c.Config.Cmd.Slice()[1], "arg2", "Arg2 doesn't match")
assert.True(c.HostConfig.Privileged, "Privileged doesn't match")
}
func TestIdFromName(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--name foo -v /test busybox echo hi",
}
assert.Equal("", cfg.Id)
NewContainer(config.DOCKER_HOST, cfg)
assert.Equal("foo", cfg.Id)
}
func testMigrateVolumes(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name foo -v /test busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
test_path, ok := c.Container.Volumes["/test"]
assert.True(ok, "")
c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
MigrateVolumes: true,
Cmd: "--name foo -v /test2 busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c2.Err, "")
assert.True(c2.Container != nil)
_, ok = c2.Container.Volumes["/test2"]
assert.True(ok, "")
assert.Equal(test_path, c2.Container.Volumes["/test"])
c.Delete()
c2.Delete()
}
func testRollback(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
assert.Equal("rollback", c.Container.Name)
c2 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo bye",
}).Parse().Start().Lookup()
assert.Equal("rollback", c2.Container.Name)
assert.NoError(c2.Err, "")
assert.NotEqual(c.Container.ID, c2.Container.ID)
c3 := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--name rollback busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c3.Err, "")
assert.Equal(c.Container.ID, c3.Container.ID)
assert.Equal("rollback", c3.Container.Name)
c2.Reset().Lookup()
assert.NoError(c2.Err, "")
assert.True(strings.HasPrefix(c2.Container.Name, "rollback-"))
c.Delete()
c2.Delete()
}
func testStart(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--pid=host --privileged --rm busybox echo hi",
}).Parse().Start().Lookup()
assert.NoError(c.Err, "")
assert.True(c.HostConfig.Privileged, "")
assert.True(c.Container.HostConfig.Privileged, "")
assert.Equal("host", c.Container.HostConfig.PidMode, "")
c.Delete()
}
func testLookup(t *testing.T) {
assert := require.New(t)
cfg := &config.ContainerConfig{
Cmd: "--rm busybox echo hi",
}
c := NewContainer(config.DOCKER_HOST, cfg).Parse().Start()
cfg2 := &config.ContainerConfig{
Cmd: "--rm busybox echo hi2",
}
c2 := NewContainer(config.DOCKER_HOST, cfg2).Parse().Start()
assert.NoError(c.Err, "")
assert.NoError(c2.Err, "")
c1Lookup := NewContainer(config.DOCKER_HOST, cfg).Lookup()
c2Lookup := NewContainer(config.DOCKER_HOST, cfg2).Lookup()
assert.NoError(c1Lookup.Err, "")
assert.NoError(c2Lookup.Err, "")
assert.Equal(c.Container.ID, c1Lookup.Container.ID, "")
assert.Equal(c2.Container.ID, c2Lookup.Container.ID, "")
c.Delete()
c2.Delete()
}
func testDelete(t *testing.T) {
assert := require.New(t)
c := NewContainer(config.DOCKER_HOST, &config.ContainerConfig{
Cmd: "--rm busybox echo hi",
}).Parse()
assert.False(c.Exists())
assert.NoError(c.Err, "")
c.Start()
assert.NoError(c.Err, "")
c.Reset()
assert.NoError(c.Err, "")
assert.True(c.Exists())
assert.NoError(c.Err, "")
c.Delete()
assert.NoError(c.Err, "")
c.Reset()
assert.False(c.Exists())
assert.NoError(c.Err, "")
}
func testDockerClientNames(t *testing.T) {
assert := require.New(t)
client, err := dockerClient.NewClient(config.DOCKER_HOST)
assert.NoError(err, "")
c, err := client.CreateContainer(dockerClient.CreateContainerOptions{
Name: "foo",
Config: &dockerClient.Config{
Image: "ubuntu",
},
})
assert.NoError(err, "")
assert.Equal("foo", c.Name)
c2, err := client.InspectContainer(c.ID)
assert.NoError(err, "")
assert.Equal("/foo", c2.Name)
c2, err = inspect(client, c.ID)
assert.NoError(err, "")
assert.Equal("foo", c2.Name)
client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: c2.ID,
Force: true,
})
}

55
docker/env.go Normal file
View File

@ -0,0 +1,55 @@
package docker
import (
"fmt"
"strings"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/config"
)
type ConfigEnvironment struct {
cfg *config.CloudConfig
}
func NewConfigEnvironment(cfg *config.CloudConfig) *ConfigEnvironment {
return &ConfigEnvironment{
cfg: cfg,
}
}
func appendEnv(array []string, key, value string) []string {
parts := strings.SplitN(key, "/", 2)
if len(parts) == 2 {
key = parts[1]
}
return append(array, fmt.Sprintf("%s=%s", key, value))
}
func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
for _, key := range keys {
if strings.HasSuffix(key, "*") {
result := []string{}
for envKey, envValue := range cfg.Rancher.Environment {
keyPrefix := key[:len(key)-1]
if strings.HasPrefix(envKey, keyPrefix) {
result = appendEnv(result, envKey, envValue)
}
}
if len(result) > 0 {
return result
}
} else if value, ok := cfg.Rancher.Environment[key]; ok {
return appendEnv([]string{}, key, value)
}
}
return []string{}
}
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}

View File

@ -1,103 +0,0 @@
package docker
import (
log "github.com/Sirupsen/logrus"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type ContainerFactory struct {
cfg *config.CloudConfig
}
type containerBasedService struct {
project.EmptyService
name string
project *project.Project
container *Container
serviceConfig *project.ServiceConfig
cfg *config.CloudConfig
}
func NewContainerFactory(cfg *config.CloudConfig) *ContainerFactory {
return &ContainerFactory{
cfg: cfg,
}
}
func (c *containerBasedService) Up() error {
container := c.container
containerCfg := c.container.ContainerCfg
fakeCreate := false
create := containerCfg.CreateOnly
if util.Contains(c.cfg.Rancher.Disable, c.name) {
fakeCreate = true
}
var event project.Event
c.project.Notify(project.CONTAINER_STARTING, c.name, map[string]string{})
if fakeCreate {
event = project.CONTAINER_CREATED
} else if create {
container.Create()
event = project.CONTAINER_CREATED
} else {
container.StartAndWait()
event = project.CONTAINER_STARTED
}
if container.Err != nil {
log.Errorf("Failed to run %v: %v", containerCfg.Id, container.Err)
}
if container.Err == nil && containerCfg.ReloadConfig {
return project.ErrRestart
}
if container.Container != nil {
c.project.Notify(event, c.name, map[string]string{
project.CONTAINER_ID: container.Container.ID,
})
}
return container.Err
}
func (c *containerBasedService) Config() *project.ServiceConfig {
return c.serviceConfig
}
func (c *containerBasedService) Name() string {
return c.name
}
func isSystemService(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
}
func (c *ContainerFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
host := config.DOCKER_HOST
if isSystemService(serviceConfig) {
host = config.DOCKER_SYSTEM_HOST
}
container := NewContainerFromService(host, name, serviceConfig)
if container.Err != nil {
return nil, container.Err
}
return &containerBasedService{
name: name,
project: project,
container: container,
serviceConfig: serviceConfig,
cfg: c.cfg,
}, nil
}

159
docker/service.go Normal file
View File

@ -0,0 +1,159 @@
package docker
import (
"fmt"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/docker/machine/log"
"github.com/rancherio/os/config"
"github.com/samalba/dockerclient"
)
type Service struct {
*docker.Service
deps map[string][]string
context *docker.Context
}
func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context) *Service {
return &Service{
Service: docker.NewService(name, serviceConfig, context),
deps: factory.Deps,
context: context,
}
}
func (s *Service) DependentServices() []project.ServiceRelationship {
rels := s.Service.DependentServices()
for _, dep := range s.deps[s.Name()] {
rels = appendLink(rels, dep, true)
}
if s.requiresSyslog() {
rels = appendLink(rels, "syslog", false)
}
if s.requiresUserDocker() {
// Linking to cloud-init is a hack really. The problem is we need to link to something
// that will trigger a reload
rels = appendLink(rels, "cloud-init", false)
} else if s.missingImage() {
rels = appendLink(rels, "network", false)
}
return rels
}
func (s *Service) missingImage() bool {
image := s.Config().Image
if image == "" {
return false
}
client := s.context.ClientFactory.Create(s)
i, err := client.InspectImage(s.Config().Image)
return err != nil || i == nil
}
func (s *Service) requiresSyslog() bool {
return s.Config().LogDriver == "syslog"
}
func (s *Service) requiresUserDocker() bool {
return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM
}
func appendLink(deps []project.ServiceRelationship, name string, optional bool) []project.ServiceRelationship {
rel := project.NewServiceRelationship(name, project.REL_TYPE_LINK)
rel.Optional = optional
return append(deps, rel)
}
func (s *Service) Up() error {
labels := s.Config().Labels.MapParts()
if err := s.Service.Create(); err != nil {
return err
}
if err := s.rename(); err != nil {
return err
}
if labels[config.CREATE_ONLY] == "true" {
return s.checkReload(labels)
}
if err := s.Service.Up(); err != nil {
return err
}
if labels[config.DETACH] == "false" {
if err := s.wait(); err != nil {
return err
}
}
return s.checkReload(labels)
}
func (s *Service) checkReload(labels map[string]string) error {
if labels[config.RELOAD_CONFIG] == "true" {
return project.ErrRestart
}
return nil
}
func (s *Service) Create() error {
if err := s.Service.Create(); err != nil {
return err
}
return s.rename()
}
func (s *Service) getContainer() (dockerclient.Client, *dockerclient.ContainerInfo, error) {
containers, err := s.Service.Containers()
if err != nil {
return nil, nil, err
}
if len(containers) == 0 {
return nil, nil, nil
}
id, err := containers[0].Id()
if err != nil {
return nil, nil, err
}
client := s.context.ClientFactory.Create(s)
info, err := client.InspectContainer(id)
return client, info, err
}
func (s *Service) wait() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
status := <-client.Wait(info.Id)
if status.Error != nil {
return status.Error
}
if status.ExitCode == 0 {
return nil
} else {
return fmt.Errorf("ExitCode %d", status.ExitCode)
}
}
func (s *Service) rename() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
if len(info.Name) > 0 && info.Name[1:] != s.Name() {
log.Debugf("Renaming container %s => %s", info.Name[1:], s.Name())
return client.RenameContainer(info.Name[1:], s.Name())
} else {
return nil
}
}

27
docker/service_factory.go Normal file
View File

@ -0,0 +1,27 @@
package docker
import (
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancherio/os/util"
)
type ServiceFactory struct {
Context *docker.Context
Deps map[string][]string
}
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" {
for _, dep := range util.TrimSplit(after, ",") {
s.Deps[name] = append(s.Deps[name], dep)
}
}
if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" {
for _, dep := range util.TrimSplit(before, ",") {
s.Deps[dep] = append(s.Deps[dep], name)
}
}
return NewService(s, name, serviceConfig, s.Context), nil
}

View File

@ -1,137 +0,0 @@
package docker
import (
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/rancherio/os/config"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
)
type configEnvironment struct {
cfg *config.CloudConfig
}
func appendEnv(array []string, key, value string) []string {
parts := strings.SplitN(key, "/", 2)
if len(parts) == 2 {
key = parts[1]
}
return append(array, fmt.Sprintf("%s=%s", key, value))
}
func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
for _, key := range keys {
if strings.HasSuffix(key, "*") {
result := []string{}
for envKey, envValue := range cfg.Rancher.Environment {
keyPrefix := key[:len(key)-1]
if strings.HasPrefix(envKey, keyPrefix) {
result = appendEnv(result, envKey, envValue)
}
}
if len(result) > 0 {
return result
}
} else if value, ok := cfg.Rancher.Environment[key]; ok {
return appendEnv([]string{}, key, value)
}
}
return []string{}
}
func (c *configEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}
func RunServices(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) error {
network := false
projectEvents := make(chan project.ProjectEvent)
p := project.NewProject(name, NewContainerFactory(cfg))
p.EnvironmentLookup = &configEnvironment{cfg: cfg}
p.AddListener(projectEvents)
enabled := make(map[string]bool)
for name, serviceConfig := range configs {
if err := p.AddConfig(name, serviceConfig); err != nil {
log.Infof("Failed loading service %s", name)
continue
}
enabled[name] = true
}
p.ReloadCallback = func() error {
if p.Name != "system-init" {
return nil
}
if err := cfg.Reload(); err != nil {
return err
}
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if !serviceEnabled {
continue
}
if en, ok := enabled[service]; ok && en {
continue
}
bytes, err := LoadServiceResource(service, network, cfg)
if err != nil {
if err == util.ErrNoNetwork {
log.Debugf("Can not load %s, networking not enabled", service)
} else {
log.Errorf("Failed to load %s : %v", service, err)
}
continue
}
if err := p.Load(bytes); err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = true
}
for service, config := range cfg.Rancher.Services {
if en, ok := enabled[service]; ok && en {
continue
}
if err := p.AddConfig(service, config); err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = true
}
return nil
}
go func() {
for event := range projectEvents {
if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" {
network = true
}
}
}()
if err := p.ReloadCallback(); err != nil {
log.Errorf("Failed to reload %s : %v", name, err)
return err
}
return p.Up()
}
func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) {
return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray())
}

11
docker/util.go Normal file
View File

@ -0,0 +1,11 @@
package docker
import (
"github.com/docker/libcompose/project"
"github.com/rancherio/os/config"
)
func IsSystemContainer(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
}

View File

@ -8,11 +8,11 @@ import (
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project"
"github.com/rancher/docker-from-scratch"
"github.com/rancherio/os/compose"
"github.com/rancherio/os/config"
"github.com/rancherio/os/docker"
"github.com/rancherio/os/util"
"github.com/rancherio/rancher-compose/librcompose/project"
)
func autoformat(cfg *config.CloudConfig) error {
@ -23,16 +23,17 @@ func autoformat(cfg *config.CloudConfig) error {
FORMATZERO := "FORMATZERO=" + fmt.Sprint(cfg.Rancher.State.FormatZero)
cfg.Rancher.Autoformat["autoformat"].Environment = project.NewMaporEqualSlice([]string{AUTOFORMAT, FORMATZERO})
log.Info("Running Autoformat services")
err := docker.RunServices("autoformat", cfg, cfg.Rancher.Autoformat)
_, err := compose.RunServiceSet("autoformat", cfg, cfg.Rancher.Autoformat)
return err
}
func runBootstrapContainers(cfg *config.CloudConfig) error {
log.Info("Running Bootstrap services")
return docker.RunServices("bootstrap", cfg, cfg.Rancher.BootstrapContainers)
_, err := compose.RunServiceSet("bootstrap", cfg, cfg.Rancher.BootstrapContainers)
return err
}
func startDocker(cfg *config.Config) (chan interface{}, error) {
func startDocker(cfg *config.CloudConfig) (chan interface{}, error) {
launchConfig, args := getLaunchConfig(cfg, &cfg.Rancher.BootstrapDocker)
launchConfig.Fork = true

View File

@ -30,7 +30,7 @@ var (
}
)
func loadModules(cfg *config.Config) error {
func loadModules(cfg *config.CloudConfig) error {
mounted := map[string]bool{}
f, err := os.Open("/proc/modules")
@ -44,7 +44,7 @@ func loadModules(cfg *config.Config) error {
mounted[strings.SplitN(reader.Text(), " ", 2)[0]] = true
}
for _, module := range cfg.Modules {
for _, module := range cfg.Rancher.Modules {
if mounted[module] {
continue
}
@ -58,7 +58,7 @@ func loadModules(cfg *config.Config) error {
return nil
}
func sysInit(cfg *config.Config) error {
func sysInit(cfg *config.CloudConfig) error {
args := append([]string{config.SYSINIT_BIN}, os.Args[1:]...)
cmd := &exec.Cmd{
@ -83,18 +83,18 @@ func MainInit() {
}
}
func mountState(cfg *config.Config) error {
func mountState(cfg *config.CloudConfig) error {
var err error
if cfg.State.Dev == "" {
if cfg.Rancher.State.Dev == "" {
return nil
}
dev := util.ResolveDevice(cfg.State.Dev)
dev := util.ResolveDevice(cfg.Rancher.State.Dev)
if dev == "" {
return fmt.Errorf("Could not resolve device %q", cfg.State.Dev)
return fmt.Errorf("Could not resolve device %q", cfg.Rancher.State.Dev)
}
fsType := cfg.State.FsType
fsType := cfg.Rancher.State.FsType
if fsType == "auto" {
fsType, err = util.GetFsType(dev)
}
@ -108,7 +108,7 @@ func mountState(cfg *config.Config) error {
return util.Mount(dev, STATE, fsType, "")
}
func tryMountState(cfg *config.Config) error {
func tryMountState(cfg *config.CloudConfig) error {
if mountState(cfg) == nil {
return nil
}
@ -121,8 +121,8 @@ func tryMountState(cfg *config.Config) error {
return mountState(cfg)
}
func tryMountAndBootstrap(cfg *config.Config) error {
if err := tryMountState(cfg); !cfg.State.Required && err != nil {
func tryMountAndBootstrap(cfg *config.CloudConfig) error {
if err := tryMountState(cfg); !cfg.Rancher.State.Required && err != nil {
return nil
} else if err != nil {
return err
@ -132,15 +132,15 @@ func tryMountAndBootstrap(cfg *config.Config) error {
return switchRoot(STATE)
}
func getLaunchConfig(cfg *config.Config, dockerCfg *config.DockerConfig) (*dockerlaunch.Config, []string) {
func getLaunchConfig(cfg *config.CloudConfig, dockerCfg *config.DockerConfig) (*dockerlaunch.Config, []string) {
var launchConfig dockerlaunch.Config
args := dockerlaunch.ParseConfig(&launchConfig, append(dockerCfg.Args, dockerCfg.ExtraArgs...)...)
launchConfig.DnsConfig.Nameservers = cfg.Network.Dns.Nameservers
launchConfig.DnsConfig.Search = cfg.Network.Dns.Search
launchConfig.DnsConfig.Nameservers = cfg.Rancher.Network.Dns.Nameservers
launchConfig.DnsConfig.Search = cfg.Rancher.Network.Dns.Search
if !cfg.Debug {
if !cfg.Rancher.Debug {
launchConfig.LogFile = config.SYSTEM_DOCKER_LOG
}
@ -148,17 +148,17 @@ func getLaunchConfig(cfg *config.Config, dockerCfg *config.DockerConfig) (*docke
}
func RunInit() error {
var cfg config.Config
var cfg config.CloudConfig
os.Setenv("PATH", "/sbin:/usr/sbin:/usr/bin")
// Magic setting to tell Docker to do switch_root and not pivot_root
os.Setenv("DOCKER_RAMDISK", "true")
initFuncs := []config.InitFunc{
func(cfg *config.Config) error {
func(cfg *config.CloudConfig) error {
return dockerlaunch.PrepareFs(&mountConfig)
},
func(cfg *config.Config) error {
func(cfg *config.CloudConfig) error {
newCfg, err := config.LoadConfig()
if err == nil {
newCfg, err = config.LoadConfig()
@ -167,7 +167,7 @@ func RunInit() error {
*cfg = *newCfg
}
if cfg.Debug {
if cfg.Rancher.Debug {
cfgString, _ := config.Dump(false, true)
if cfgString != "" {
log.Debugf("Config: %s", cfgString)
@ -178,7 +178,7 @@ func RunInit() error {
},
loadModules,
tryMountAndBootstrap,
func(cfg *config.Config) error {
func(cfg *config.CloudConfig) error {
return cfg.Reload()
},
loadModules,
@ -189,7 +189,7 @@ func RunInit() error {
return err
}
launchConfig, args := getLaunchConfig(&cfg, &cfg.SystemDocker)
launchConfig, args := getLaunchConfig(&cfg, &cfg.Rancher.SystemDocker)
log.Info("Launching System Docker")
_, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...)

View File

@ -44,15 +44,14 @@ func copyMoveRoot(rootfs string) error {
filename := path.Join("/", file.Name())
if filename == rootfs {
log.Debugf("Skipping Deleting %s", filename)
continue
}
log.Debugf("Deleting %s", filename)
//if err := os.Remove(filename); err != nil {
if err := os.RemoveAll(filename); err != nil {
return err
}
//}
}
return nil
@ -90,7 +89,7 @@ func switchRoot(rootfs string) error {
}
log.Debugf("Successfully moved to new root at %s", rootfs)
os.Setenv("DOCKER_RAMDISK", "false")
os.Unsetenv("DOCKER_RAMDISK")
return nil
}

View File

@ -7,6 +7,7 @@ import (
log "github.com/Sirupsen/logrus"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancherio/os/compose"
"github.com/rancherio/os/config"
"github.com/rancherio/os/docker"
)
@ -88,42 +89,6 @@ func loadImages(cfg *config.CloudConfig) error {
return nil
}
func runContainers(cfg *config.CloudConfig) error {
return docker.RunServices("system-init", cfg, cfg.Rancher.Services)
}
func tailConsole(cfg *config.CloudConfig) error {
if !cfg.Rancher.Console.Tail {
return nil
}
client, err := docker.NewSystemClient()
if err != nil {
return err
}
console, ok := cfg.Rancher.Services[config.CONSOLE_CONTAINER]
if !ok {
log.Error("Console not found")
return nil
}
c := docker.NewContainerFromService(config.DOCKER_SYSTEM_HOST, config.CONSOLE_CONTAINER, console)
if c.Err != nil {
return c.Err
}
log.Infof("Tailing console : %s", c.Name)
return client.Logs(dockerClient.LogsOptions{
Container: c.Name,
Stdout: true,
Stderr: true,
Follow: true,
OutputStream: os.Stdout,
ErrorStream: os.Stderr,
})
}
func SysInit() error {
cfg, err := config.LoadConfig()
if err != nil {
@ -132,7 +97,9 @@ func SysInit() error {
initFuncs := []config.InitFunc{
loadImages,
runContainers,
func(cfg *config.CloudConfig) error {
return compose.RunServices(cfg)
},
func(cfg *config.CloudConfig) error {
syscall.Sync()
return nil
@ -141,7 +108,6 @@ func SysInit() error {
log.Infof("RancherOS %s started", config.VERSION)
return nil
},
tailConsole,
}
return config.RunInitFuncs(cfg, initFuncs)

View File

@ -6,6 +6,7 @@ import (
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/reexec"
dockerlaunchMain "github.com/rancher/docker-from-scratch/main"
"github.com/rancherio/os/cmd/cloudinit"
"github.com/rancherio/os/cmd/control"
"github.com/rancherio/os/cmd/network"
@ -39,6 +40,7 @@ func registerCmd(cmd string, mainFunc func()) {
func main() {
registerCmd("/init", osInit.MainInit)
registerCmd(config.SYSINIT_BIN, sysinit.Main)
registerCmd("/usr/bin/dockerlaunch", dockerlaunchMain.Main)
registerCmd("/usr/bin/system-docker", systemdocker.Main)
registerCmd("/sbin/poweroff", power.PowerOff)
registerCmd("/sbin/reboot", power.Reboot)

View File

@ -27,8 +27,7 @@ rancher:
labels:
io.rancher.os.detach: false
io.rancher.os.scope: system
links:
- autoformat
io.rancher.os.after: autoformat
log_driver: json-file
net: host
uts: host
@ -89,10 +88,7 @@ rancher:
io.rancher.os.detach: false
io.rancher.os.reloadconfig: true
io.rancher.os.scope: system
links:
- preload-user-images
- cloud-init-pre
- network
io.rancher.os.after: cloud-init-pre,network
net: host
uts: host
privileged: true
@ -107,8 +103,7 @@ rancher:
io.rancher.os.detach: false
io.rancher.os.reloadconfig: true
io.rancher.os.scope: system
links:
- preload-system-images
io.rancher.os.after: preload-system-images
net: host
uts: host
privileged: true
@ -136,15 +131,13 @@ rancher:
- /usr/bin/ros:/usr/bin/respawn:ro
- /usr/bin/ros:/usr/bin/system-docker:ro
- /usr/bin/ros:/usr/sbin/wait-for-docker:ro
- /lib/modules:/lib/modules
- /usr/bin/ros:/usr/sbin/dockerlaunch:ro
- /usr/bin/docker:/usr/bin/docker:ro
console:
image: rancher/os-console:v0.4.0-dev
labels:
io.rancher.os.remove: true
io.rancher.os.scope: system
links:
- cloud-init
io.rancher.os.after: cloud-init
net: host
uts: host
pid: host
@ -157,8 +150,7 @@ rancher:
image: rancher/os-docker:v0.4.0-dev
labels:
io.rancher.os.scope: system
links:
- network
io.rancher.os.after: network
net: host
uts: host
pid: host
@ -177,27 +169,14 @@ rancher:
privileged: true
read_only: true
volumes:
- /var/lib/rancher/conf:/var/lib/rancher/conf
- /var/lib/docker:/var/lib/docker
- /var/lib/system-docker:/var/lib/system-docker
dockerwait:
image: rancher/os-dockerwait:v0.4.0-dev
labels:
io.rancher.os.detach: false
io.rancher.os.scope: system
links:
- docker
net: host
uts: host
volumes_from:
- all-volumes
network:
image: rancher/os-network:v0.4.0-dev
labels:
io.rancher.os.detach: false
io.rancher.os.scope: system
links:
- cloud-init-pre
io.rancher.os.after: cloud-init-pre
net: host
uts: host
privileged: true
@ -208,9 +187,7 @@ rancher:
image: rancher/os-ntp:v0.4.0-dev
labels:
io.rancher.os.scope: system
links:
- cloud-init
- network
io.rancher.os.after: cloud-init, network
net: host
uts: host
privileged: true
@ -231,9 +208,6 @@ rancher:
image: rancher/os-preload:v0.4.0-dev
labels:
io.rancher.os.detach: false
io.rancher.os.scope: system
links:
- dockerwait
privileged: true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
@ -263,7 +237,7 @@ rancher:
read_only: true
volumes:
- /dev:/host/dev
- /os-config.yml:/os-config.yml
- /usr/share/ros/os-config.yml:/usr/share/ros/os-config.yml
- /var/lib/rancher:/var/lib/rancher
- /var/lib/rancher/conf:/var/lib/rancher/conf
- /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt.rancher

View File

@ -99,7 +99,7 @@ else
done
fi
KERNEL_ARGS="rancher.password=rancher console=ttyS0 ${QEMU_APPEND}"
KERNEL_ARGS="rancher.password=rancher rancher.modules=[9p,9pnet_virtio] console=ttyS0 ${QEMU_APPEND}"
if [ "$UNAME" == "Darwin" ] && [ -x $(which xhyve) ]; then

54
util/backoff.go Normal file
View File

@ -0,0 +1,54 @@
package util
import "time"
type Backoff struct {
StartMillis, MaxIntervalMillis, MaxMillis int
c chan bool
done chan bool
}
func (b *Backoff) Start() <-chan bool {
b.c = make(chan bool)
b.done = make(chan bool)
go b.backoff()
return b.c
}
func (b *Backoff) Close() error {
b.done <- true
return nil
}
func (b *Backoff) backoff() {
if b.StartMillis == 0 && b.MaxIntervalMillis == 0 {
b.StartMillis = 100
b.MaxIntervalMillis = 2000
b.MaxMillis = 300000
}
start := time.Now()
currentMillis := b.StartMillis
for {
writeVal := true
if time.Now().Sub(start) > (time.Duration(b.MaxMillis) * time.Millisecond) {
b.c <- false
}
select {
case <-b.done:
close(b.done)
close(b.c)
return
case b.c <- writeVal:
}
time.Sleep(time.Duration(currentMillis) * time.Millisecond)
currentMillis *= 2
if currentMillis > b.MaxIntervalMillis {
currentMillis = b.MaxIntervalMillis
}
}
}

View File

@ -345,3 +345,16 @@ func KVPairs2Map(kvs []string) map[string]string {
}
return r
}
func TrimSplitN(str, sep string, count int) []string {
result := []string{}
for _, part := range strings.SplitN(strings.TrimSpace(str), sep, count) {
result = append(result, strings.TrimSpace(part))
}
return result
}
func TrimSplit(str, sep string) []string {
return TrimSplitN(str, sep, -1)
}