Bump cAdvisor (and dependencies) godeps version

This commit is contained in:
Tim St. Clair
2016-05-20 11:43:32 -07:00
parent 4215fe57a5
commit 237f90d6ee
388 changed files with 3788 additions and 121879 deletions

View File

@@ -20,18 +20,18 @@ package docker
import (
"sync"
dclient "github.com/fsouza/go-dockerclient"
dclient "github.com/docker/engine-api/client"
)
var (
dockerClient *dclient.Client
dockerClientErr error
once sync.Once
dockerClient *dclient.Client
dockerClientErr error
dockerClientOnce sync.Once
)
func Client() (*dclient.Client, error) {
once.Do(func() {
dockerClient, dockerClientErr = dclient.NewClient(*ArgDockerEndpoint)
dockerClientOnce.Do(func() {
dockerClient, dockerClientErr = dclient.NewClient(*ArgDockerEndpoint, "", nil, nil)
})
return dockerClient, dockerClientErr
}

View File

@@ -0,0 +1,163 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provides global docker information.
package docker
import (
"fmt"
"strconv"
"strings"
dockertypes "github.com/docker/engine-api/types"
"golang.org/x/net/context"
"github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/machine"
)
func Status() (v1.DockerStatus, error) {
client, err := Client()
if err != nil {
return v1.DockerStatus{}, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
dockerInfo, err := client.Info(context.Background())
if err != nil {
return v1.DockerStatus{}, err
}
out := v1.DockerStatus{}
out.Version = VersionString()
out.KernelVersion = machine.KernelVersion()
out.OS = dockerInfo.OperatingSystem
out.Hostname = dockerInfo.Name
out.RootDir = dockerInfo.DockerRootDir
out.Driver = dockerInfo.Driver
out.ExecDriver = dockerInfo.ExecutionDriver
out.NumImages = dockerInfo.Images
out.NumContainers = dockerInfo.Containers
out.DriverStatus = make(map[string]string, len(dockerInfo.DriverStatus))
for _, v := range dockerInfo.DriverStatus {
out.DriverStatus[v[0]] = v[1]
}
return out, nil
}
func Images() ([]v1.DockerImage, error) {
client, err := Client()
if err != nil {
return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
images, err := client.ImageList(context.Background(), dockertypes.ImageListOptions{All: false})
if err != nil {
return nil, err
}
out := []v1.DockerImage{}
const unknownTag = "<none>:<none>"
for _, image := range images {
if len(image.RepoTags) == 1 && image.RepoTags[0] == unknownTag {
// images with repo or tags are uninteresting.
continue
}
di := v1.DockerImage{
ID: image.ID,
RepoTags: image.RepoTags,
Created: image.Created,
VirtualSize: image.VirtualSize,
Size: image.Size,
}
out = append(out, di)
}
return out, nil
}
// Checks whether the dockerInfo reflects a valid docker setup, and returns it if it does, or an
// error otherwise.
func ValidateInfo() (*dockertypes.Info, error) {
client, err := Client()
if err != nil {
return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
dockerInfo, err := client.Info(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to detect Docker info: %v", err)
}
// Fall back to version API if ServerVersion is not set in info.
if dockerInfo.ServerVersion == "" {
version, err := client.ServerVersion(context.Background())
if err != nil {
return nil, fmt.Errorf("unable to get docker version: %v", err)
}
dockerInfo.ServerVersion = version.Version
}
version, err := parseDockerVersion(dockerInfo.ServerVersion)
if err != nil {
return nil, err
}
if version[0] < 1 {
return nil, fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as %q", []int{1, 0, 0}, version, dockerInfo.ServerVersion)
}
// Check that the libcontainer execdriver is used if the version is < 1.11
// (execution drivers are no longer supported as of 1.11).
if version[0] <= 1 && version[1] <= 10 &&
!strings.HasPrefix(dockerInfo.ExecutionDriver, "native") {
return nil, fmt.Errorf("docker found, but not using native exec driver")
}
if dockerInfo.Driver == "" {
return nil, fmt.Errorf("failed to find docker storage driver")
}
return &dockerInfo, nil
}
func Version() ([]int, error) {
return parseDockerVersion(VersionString())
}
func VersionString() string {
docker_version := "Unknown"
client, err := Client()
if err == nil {
version, err := client.ServerVersion(context.Background())
if err == nil {
docker_version = version.Version
}
}
return docker_version
}
// TODO: switch to a semantic versioning library.
func parseDockerVersion(full_version_string string) ([]int, error) {
matches := version_re.FindAllStringSubmatch(full_version_string, -1)
if len(matches) != 1 {
return nil, fmt.Errorf("version string \"%v\" doesn't match expected regular expression: \"%v\"", full_version_string, version_regexp_string)
}
version_string_array := matches[0][1:]
version_array := make([]int, 3)
for index, version_string := range version_string_array {
version, err := strconv.Atoi(version_string)
if err != nil {
return nil, fmt.Errorf("error while parsing \"%v\" in \"%v\"", version_string, full_version_string)
}
version_array[index] = version
}
return version_array, nil
}

View File

@@ -19,27 +19,26 @@ import (
"fmt"
"path"
"regexp"
"strconv"
"strings"
"sync"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/devicemapper"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
dockerutil "github.com/google/cadvisor/utils/docker"
docker "github.com/fsouza/go-dockerclient"
docker "github.com/docker/engine-api/client"
"github.com/golang/glog"
"golang.org/x/net/context"
)
var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint")
// The namespace under which Docker aliases are unique.
var DockerNamespace = "docker"
// Basepath to all container specific information that libcontainer stores.
// TODO: Deprecate this flag
var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path to the Docker state root directory (default: /var/lib/docker)")
var dockerRunDir = flag.String("docker_run", "/var/run/docker", "Absolute path to the Docker run directory (default: /var/run/docker)")
const DockerNamespace = "docker"
// Regexp that identifies docker cgroups, containers started with
// --cgroup-parent have another prefix than 'docker'
@@ -47,24 +46,30 @@ var dockerCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
var dockerEnvWhitelist = flag.String("docker_env_metadata_whitelist", "", "a comma-separated list of environment variable keys that needs to be collected for docker containers")
// TODO(vmarmol): Export run dir too for newer Dockers.
// Directory holding Docker container state information.
func DockerStateDir() string {
return libcontainer.DockerStateDir(*dockerRootDir)
}
var (
// Basepath to all container specific information that libcontainer stores.
dockerRootDir string
const (
dockerRootDirKey = "Root Dir"
dockerRootDirFlag = flag.String("docker_root", "/var/lib/docker", "DEPRECATED: docker root is read from docker info (this is a fallback, default: /var/lib/docker)")
dockerRootDirOnce sync.Once
)
func RootDir() string {
return *dockerRootDir
dockerRootDirOnce.Do(func() {
status, err := Status()
if err == nil && status.RootDir != "" {
dockerRootDir = status.RootDir
} else {
dockerRootDir = *dockerRootDirFlag
}
})
return dockerRootDir
}
type storageDriver string
const (
// TODO: Add support for devicemapper storage usage.
devicemapperStorageDriver storageDriver = "devicemapper"
aufsStorageDriver storageDriver = "aufs"
overlayStorageDriver storageDriver = "overlay"
@@ -88,6 +93,8 @@ type dockerFactory struct {
dockerVersion []int
ignoreMetrics container.MetricSet
thinPoolWatcher *devicemapper.ThinPoolWatcher
}
func (self *dockerFactory) String() string {
@@ -114,6 +121,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
metadataEnvs,
self.dockerVersion,
self.ignoreMetrics,
self.thinPoolWatcher,
)
return
}
@@ -146,7 +154,7 @@ func (self *dockerFactory) CanHandleAndAccept(name string) (bool, bool, error) {
id := ContainerNameToDockerId(name)
// We assume that if Inspect fails then the container is not known to docker.
ctnr, err := self.client.InspectContainer(id)
ctnr, err := self.client.ContainerInspect(context.Background(), id)
if err != nil || !ctnr.State.Running {
return false, canAccept, fmt.Errorf("error inspecting container: %v", err)
}
@@ -163,24 +171,6 @@ var (
version_re = regexp.MustCompile(version_regexp_string)
)
// TODO: switch to a semantic versioning library.
func parseDockerVersion(full_version_string string) ([]int, error) {
matches := version_re.FindAllStringSubmatch(full_version_string, -1)
if len(matches) != 1 {
return nil, fmt.Errorf("version string \"%v\" doesn't match expected regular expression: \"%v\"", full_version_string, version_regexp_string)
}
version_string_array := matches[0][1:]
version_array := make([]int, 3)
for index, version_string := range version_string_array {
version, err := strconv.Atoi(version_string)
if err != nil {
return nil, fmt.Errorf("error while parsing \"%v\" in \"%v\"", version_string, full_version_string)
}
version_array[index] = version
}
return version_array, nil
}
// Register root container before running this function!
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
client, err := Client()
@@ -196,16 +186,35 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
// Version already validated above, assume no error here.
dockerVersion, _ := parseDockerVersion(dockerInfo.ServerVersion)
storageDir := dockerInfo.DockerRootDir
if storageDir == "" {
storageDir = *dockerRootDir
}
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
}
glog.Infof("Registering Docker factory")
var (
dockerStorageDriver = storageDriver(dockerInfo.Driver)
thinPoolWatcher *devicemapper.ThinPoolWatcher = nil
)
if dockerStorageDriver == devicemapperStorageDriver {
// If the storage drive is devicemapper, create and start a
// ThinPoolWatcher to monitor the size of container CoW layers with
// thin_ls.
dockerThinPoolName, err := dockerutil.DockerThinPoolName(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't find device mapper thin pool name: %v", err)
}
dockerMetadataDevice, err := dockerutil.DockerMetadataDevice(*dockerInfo)
if err != nil {
return fmt.Errorf("couldn't determine devicemapper metadata device")
}
thinPoolWatcher = devicemapper.NewThinPoolWatcher(dockerThinPoolName, dockerMetadataDevice)
go thinPoolWatcher.Start()
}
glog.Infof("registering Docker factory")
f := &dockerFactory{
cgroupSubsystems: cgroupSubsystems,
client: client,
@@ -213,10 +222,11 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
fsInfo: fsInfo,
machineInfoFactory: factory,
storageDriver: storageDriver(dockerInfo.Driver),
storageDir: storageDir,
storageDir: RootDir(),
ignoreMetrics: ignoreMetrics,
thinPoolWatcher: thinPoolWatcher,
}
container.RegisterContainerHandlerFactory(f)
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
return nil
}

View File

@@ -25,13 +25,18 @@ import (
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/common"
containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/devicemapper"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
dockerutil "github.com/google/cadvisor/utils/docker"
docker "github.com/fsouza/go-dockerclient"
docker "github.com/docker/engine-api/client"
dockercontainer "github.com/docker/engine-api/types/container"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/net/context"
)
const (
@@ -55,10 +60,18 @@ type dockerContainerHandler struct {
// Manager of this container's cgroups.
cgroupManager cgroups.Manager
// the docker storage driver
storageDriver storageDriver
fsInfo fs.FsInfo
rootfsStorageDir string
// devicemapper state
// the devicemapper poolname
poolName string
// the devicemapper device id for the container
deviceID string
// Time at which this container was created.
creationTime time.Time
@@ -76,14 +89,19 @@ type dockerContainerHandler struct {
rootFs string
// The network mode of the container
networkMode string
networkMode dockercontainer.NetworkMode
// Filesystem handler.
fsHandler common.FsHandler
ignoreMetrics container.MetricSet
// thin pool watcher
thinPoolWatcher *devicemapper.ThinPoolWatcher
}
var _ container.ContainerHandler = &dockerContainerHandler{}
func getRwLayerID(containerID, storageDir string, sd storageDriver, dockerVersion []int) (string, error) {
const (
// Docker version >=1.10.0 have a randomized ID for the root fs of a container.
@@ -101,6 +119,7 @@ func getRwLayerID(containerID, storageDir string, sd storageDriver, dockerVersio
return string(bytes), err
}
// newDockerContainerHandler returns a new container.ContainerHandler
func newDockerContainerHandler(
client *docker.Client,
name string,
@@ -113,6 +132,7 @@ func newDockerContainerHandler(
metadataEnvs []string,
dockerVersion []int,
ignoreMetrics container.MetricSet,
thinPoolWatcher *devicemapper.ThinPoolWatcher,
) (container.ContainerHandler, error) {
// Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
@@ -144,14 +164,27 @@ func newDockerContainerHandler(
if err != nil {
return nil, err
}
var rootfsStorageDir string
// Determine the rootfs storage dir OR the pool name to determine the device
var (
rootfsStorageDir string
poolName string
)
switch storageDriver {
case aufsStorageDriver:
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
case overlayStorageDriver:
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
case devicemapperStorageDriver:
status, err := Status()
if err != nil {
return nil, fmt.Errorf("unable to determine docker status: %v", err)
}
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
}
// TODO: extract object mother method
handler := &dockerContainerHandler{
id: id,
client: client,
@@ -162,21 +195,24 @@ func newDockerContainerHandler(
storageDriver: storageDriver,
fsInfo: fsInfo,
rootFs: rootFs,
poolName: poolName,
rootfsStorageDir: rootfsStorageDir,
envs: make(map[string]string),
ignoreMetrics: ignoreMetrics,
}
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
handler.fsHandler = common.NewFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo)
thinPoolWatcher: thinPoolWatcher,
}
// We assume that if Inspect fails then the container is not known to docker.
ctnr, err := client.InspectContainer(id)
ctnr, err := client.ContainerInspect(context.Background(), id)
if err != nil {
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
}
handler.creationTime = ctnr.Created
// Timestamp returned by Docker is in time.RFC3339Nano format.
handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
if err != nil {
// This should not happen, report the error just in case
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
}
handler.pid = ctnr.State.Pid
// Add the name and bare ID as aliases of the container.
@@ -184,6 +220,15 @@ func newDockerContainerHandler(
handler.labels = ctnr.Config.Labels
handler.image = ctnr.Config.Image
handler.networkMode = ctnr.HostConfig.NetworkMode
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
if !ignoreMetrics.Has(container.DiskUsageMetrics) {
handler.fsHandler = &dockerFsHandler{
fsHandler: common.NewFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo),
thinPoolWatcher: thinPoolWatcher,
deviceID: handler.deviceID,
}
}
// split env vars to get metadata map.
for _, exposedEnv := range metadataEnvs {
@@ -198,6 +243,48 @@ func newDockerContainerHandler(
return handler, nil
}
// dockerFsHandler is a composite FsHandler implementation the incorporates
// the common fs handler and a devicemapper ThinPoolWatcher.
type dockerFsHandler struct {
fsHandler common.FsHandler
// thinPoolWatcher is the devicemapper thin pool watcher
thinPoolWatcher *devicemapper.ThinPoolWatcher
// deviceID is the id of the container's fs device
deviceID string
}
var _ common.FsHandler = &dockerFsHandler{}
func (h *dockerFsHandler) Start() {
h.fsHandler.Start()
}
func (h *dockerFsHandler) Stop() {
h.fsHandler.Stop()
}
func (h *dockerFsHandler) Usage() (uint64, uint64) {
baseUsage, usage := h.fsHandler.Usage()
// When devicemapper is the storage driver, the base usage of the container comes from the thin pool.
// We still need the result of the fsHandler for any extra storage associated with the container.
// To correctly factor in the thin pool usage, we should:
// * Usage the thin pool usage as the base usage
// * Calculate the overall usage by adding the overall usage from the fs handler to the thin pool usage
if h.thinPoolWatcher != nil {
thinPoolUsage, err := h.thinPoolWatcher.GetUsage(h.deviceID)
if err != nil {
glog.Errorf("unable to get fs usage from thin pool for device %v: %v", h.deviceID, err)
} else {
baseUsage = thinPoolUsage
usage += thinPoolUsage
}
}
return baseUsage, usage
}
func (self *dockerContainerHandler) Start() {
if self.fsHandler != nil {
self.fsHandler.Start()
@@ -222,7 +309,7 @@ func (self *dockerContainerHandler) ContainerReference() (info.ContainerReferenc
func (self *dockerContainerHandler) needNet() bool {
if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) {
return !strings.HasPrefix(self.networkMode, "container:")
return !self.networkMode.IsContainer()
}
return false
}
@@ -242,17 +329,22 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
return nil
}
var device string
switch self.storageDriver {
case devicemapperStorageDriver:
// Device has to be the pool name to correlate with the device name as
// set in the machine info filesystems.
device = self.poolName
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
if err != nil {
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
}
device = deviceInfo.Device
default:
return nil
}
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
if err != nil {
return err
}
mi, err := self.machineInfoFactory.GetMachineInfo()
if err != nil {
return err
@@ -265,16 +357,16 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
// Docker does not impose any filesystem limits for containers. So use capacity as limit.
for _, fs := range mi.Filesystems {
if fs.Device == deviceInfo.Device {
if fs.Device == device {
limit = fs.Capacity
fsType = fs.Type
break
}
}
fsStat := info.FsStats{Device: deviceInfo.Device, Type: fsType, Limit: limit}
fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit}
fsStat.BaseUsage, fsStat.Usage = self.fsHandler.Usage()
stats.Filesystem = append(stats.Filesystem, fsStat)
return nil
@@ -316,11 +408,6 @@ func (self *dockerContainerHandler) GetCgroupPath(resource string) (string, erro
return path, nil
}
func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]int, error) {
// TODO(vmarmol): Implement.
return nil, nil
}
func (self *dockerContainerHandler) GetContainerLabels() map[string]string {
return self.labels
}
@@ -329,83 +416,10 @@ func (self *dockerContainerHandler) ListProcesses(listType container.ListType) (
return containerlibcontainer.GetProcesses(self.cgroupManager)
}
func (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error {
return fmt.Errorf("watch is unimplemented in the Docker container driver")
}
func (self *dockerContainerHandler) StopWatchingSubcontainers() error {
// No-op for Docker driver.
return nil
}
func (self *dockerContainerHandler) Exists() bool {
return common.CgroupExists(self.cgroupPaths)
}
func DockerInfo() (docker.DockerInfo, error) {
client, err := Client()
if err != nil {
return docker.DockerInfo{}, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
info, err := client.Info()
if err != nil {
return docker.DockerInfo{}, err
}
return *info, nil
}
func DockerImages() ([]docker.APIImages, error) {
client, err := Client()
if err != nil {
return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
images, err := client.ListImages(docker.ListImagesOptions{All: false})
if err != nil {
return nil, err
}
return images, nil
}
// Checks whether the dockerInfo reflects a valid docker setup, and returns it if it does, or an
// error otherwise.
func ValidateInfo() (*docker.DockerInfo, error) {
client, err := Client()
if err != nil {
return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err)
}
dockerInfo, err := client.Info()
if err != nil {
return nil, fmt.Errorf("failed to detect Docker info: %v", err)
}
// Fall back to version API if ServerVersion is not set in info.
if dockerInfo.ServerVersion == "" {
version, err := client.Version()
if err != nil {
return nil, fmt.Errorf("unable to get docker version: %v", err)
}
dockerInfo.ServerVersion = version.Get("Version")
}
version, err := parseDockerVersion(dockerInfo.ServerVersion)
if err != nil {
return nil, err
}
if version[0] < 1 {
return nil, fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as %q", []int{1, 0, 0}, version, dockerInfo.ServerVersion)
}
// Check that the libcontainer execdriver is used if the version is < 1.11
// (execution drivers are no longer supported as of 1.11).
if version[0] <= 1 && version[1] <= 10 &&
!strings.HasPrefix(dockerInfo.ExecutionDriver, "native") {
return nil, fmt.Errorf("docker found, but not using native exec driver")
}
if dockerInfo.Driver == "" {
return nil, fmt.Errorf("failed to find docker storage driver")
}
return dockerInfo, nil
func (self *dockerContainerHandler) Type() container.ContainerType {
return container.ContainerTypeDocker
}