infrakit: Move the hyperkit instance plugin into the source directory

- The tools directory ideally should not contain source code
- Removes double vendoring of packagages
- Makes it easer to hook the build into the top-level Makefile

Eventually, the plugin should be moved to the infrakit repo.

Signed-off-by: Rolf Neugebauer <rolf.neugebauer@docker.com>
This commit is contained in:
Rolf Neugebauer 2017-03-25 12:41:22 +01:00
parent 30c39863c1
commit 48845bcfd9
418 changed files with 23393 additions and 2389 deletions

View File

@ -10,11 +10,19 @@ GOARCH=amd64
ifneq ($(GOOS),linux)
CROSS=-e GOOS=$(GOOS) -e GOARCH=$(GOARCH)
endif
ifeq ($(GOOS),darwin)
default: bin/infrakit-instance-hyperkit
endif
bin/moby: $(MOBY_DEPS) | bin
tar cf - vendor src/initrd src/pad4 -C src/cmd/moby . | docker run --rm --net=none --log-driver=none -i $(CROSS) $(GO_COMPILE) --package github.com/docker/moby -o $@ | tar xf -
touch $@
MOBY_DEPS=$(wildcard src/cmd/infrakit-instance-hyperkit/*.go)
bin/infrakit-instance-hyperkit: $(INFRAKIT_DEPS) | bin
tar cf - vendor -C src/cmd/infrakit-instance-hyperkit . | docker run --rm --net=none --log-driver=none -i $(CROSS) $(GO_COMPILE) --package github.com/docker/moby -o $@ | tar xf -
touch $@
moby-initrd.img: bin/moby moby.yaml
bin/moby build moby.yaml

View File

@ -1,8 +1,9 @@
## Hyperkit/Moby Infrakit plugin
This is a Hyper/Kit Moby instance plugin for infrakit. The instance
plugin is capable to start/manage several hyperkit instances with with
different configurations and Moby configurations.
There is a HyperKit Moby instance plugin for infrakit in
`src/cmd/infrakit-instance-hyperkit`. The instance plugin is capable
to start/manage several hyperkit instances with with different
configurations and Moby configurations.
The plugin keeps state in a local directory (default
`.infrakit/hyperkit-vms`) where each instance keeps some state in a
@ -12,9 +13,9 @@ command line using the `--vm-dir` option.
## Building
```sh
make
make bin/infrakit-instance-hyperkit
```
(you need a working docker installation, such as Docker for Mac)
## Quickstart
@ -31,7 +32,7 @@ infrakit-flavor-vanilla
Then start the hyperkit plugin:
```shell
./build/infrakit-instance-hyperkit
./bin/infrakit-instance-hyperkit
```
Next, you can commit a new configuration. There is a sample infrakit config file in `hyperkit.json`. It assumes that you have a default moby build in the top-level directory. The `Moby` property needs to be of the form `<dir>/<prefix>` and assumes that the kernel and initrd images are called `<prefix>-bzImage` and `<prefix>-initrd.img` respectively (this is the convention used by the `moby` tool).

View File

@ -1,3 +0,0 @@
build/
vmlib/
vms/

View File

@ -1,10 +0,0 @@
FROM mobylinux/go-compile:3afebc59c5cde31024493c3f91e6102d584a30b9@sha256:e0786141ea7df8ba5735b63f2a24b4ade9eae5a02b0e04c4fca33b425ec69b0a
ENV GOPATH=/go
ENV PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# The project sources
VOLUME ["/go/src/github.com/docker/infrakit.hyperkit"]
WORKDIR /go/src/github.com/docker/infrakit.hyperkit
ENTRYPOINT ["make"]

View File

@ -1,49 +0,0 @@
.PHONY: build-in-container build-local
DEPS:=$(wildcard cmd/*.go) Dockerfile.build Makefile
# Version/Revision information
VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always || echo "Unknown")
REVISION ?= $(shell git rev-list -1 HEAD || echo "Unknown")
build-in-container: $(DEPS) clean
@echo "+ $@"
@docker build -t infrakit-hyperkit-build -f ./Dockerfile.build .
@docker run --rm \
-v ${CURDIR}:/go/src/github.com/docker/infrakit.hyperkit \
infrakit-hyperkit-build build-local VERSION=${VERSION} REVISION=${REVISION}
build-local: build/infrakit-instance-hyperkit
build/infrakit-instance-hyperkit: $(DEPS)
@echo "+ $@"
GOOS=darwin GOARCH=amd64 \
go build -o $@ \
--ldflags '-extldflags "-fno-PIC" -X github.com/docker/diagkit/pkg/gather.Version=$(VERSION) -X github.com/docker/diagkit/pkg/gather.Revision=$(REVISION)' \
cmd/main.go cmd/instance.go
clean:
rm -rf build
fmt:
@echo "+ $@"
@gofmt -s -l . 2>&1 | grep -v ^vendor/ | xargs gofmt -s -l -w
lint:
@echo "+ $@"
$(if $(shell which golint || echo ''), , \
$(error Please install golint))
@test -z "$$(golint ./... 2>&1 | grep -v ^vendor/ | grep -v mock/ | tee /dev/stderr)"
# this will blow away the vendored code and update it to latest
.PHONY: revendor revendor-local
revendor:
@echo "+ $@"
@rm -rf vendor.conf vendor
@docker build -t infrakit-hyperkit-build -f ./Dockerfile.build .
@docker run --rm \
-v ${CURDIR}:/go/src/github.com/docker/infrakit.hyperkit \
infrakit-hyperkit-build revendor-local VERSION=${VERSION} REVISION=${REVISION}
revendor-local:
@echo "+ $@"
@go get github.com/LK4D4/vndr
@vndr init

View File

@ -1,20 +0,0 @@
github.com/Masterminds/semver 312afcd0e81e5cf81fdc3cfd0e8504ae031521c8
github.com/Masterminds/sprig 01a849f546a584d7b29bfee253e7db0aed44f7ba
github.com/Sirupsen/logrus 10f801ebc38b33738c9d17d50860f484a0988ff5
github.com/aokoli/goutils 9c37978a95bd5c709a15883b6242714ea6709e64
github.com/armon/go-radix 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
github.com/docker/hyperkit 874e68dbb7a2a7a2794dbd8648c2f4be1e7a8bb3
github.com/docker/infrakit 208d114478ed94ee9015083e13946ca1caaad790
github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
github.com/gorilla/rpc 22c016f3df3febe0c1f6727598b6389507e03a18
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/mitchellh/go-ps 4fdf99ab29366514c69ccccddab5dc58b8d84062
github.com/rneugeba/iso9660wrap 9c7eaf5ac74b2416be8b7b8d1f35b9af44a6e4fa
github.com/satori/go.uuid b061729afc07e77a8aa4fad0a2fd840958f1942a
github.com/spf13/cobra 7be4beda01ec05d0b93d80b3facd2b6f44080d94
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
golang.org/x/crypto 459e26527287adbc2adcc5d0d49abff9a5f315a7
golang.org/x/sys 99f16d856c9836c42d24e7ab64ea72916925fa97
gopkg.in/tylerb/graceful.v1 4654dfbb6ad53cb5e27f37d99b02e16c1872fbbb

@ -1 +0,0 @@
Subproject commit 2d2df7bd8bda53b5a55ed04422173cedd50500ea

@ -1 +0,0 @@
Subproject commit 7f4b1adc791766938c29457bed0703fb9134421a

@ -1 +0,0 @@
Subproject commit 9c37978a95bd5c709a15883b6242714ea6709e64

View File

@ -1,115 +0,0 @@
## [HyperKit](http://github.com/docker/hyperkit)
![Build Status OSX](https://circleci.com/gh/docker/hyperkit.svg?style=shield&circle-token=cf8379b302eab2bbf33821cafe164dbefb71982d)
*HyperKit* is a toolkit for embedding hypervisor capabilities in your application. It includes a complete hypervisor, based on [xhyve](https://github.com/mist64/xhyve)/[bhyve](http://bhyve.org), which is optimized for lightweight virtual machines and container deployment. It is designed to be interfaced with higher-level components such as the [VPNKit](https://github.com/docker/vpnkit) and [DataKit](https://github.com/docker/datakit).
HyperKit currently only supports Mac OS X using the [Hypervisor.framework](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/index.html). It is a core component of Docker For Mac.
## Requirements
* OS X 10.10.3 Yosemite or later
* a 2010 or later Mac (i.e. a CPU that supports EPT)
## Reporting Bugs
If you are using a version of Hyperkit which is embedded into a higher level application (e.g. [Docker for Mac](https://github.com/docker/for-mac)) then please report any issues against that higher level application in the first instance. That way the relevant team can triage and determine if the issue lies in Hyperkit and assign as necessary.
If you are using Hyperkit directly then please report issues against this repository.
## Usage
$ hyperkit -h
## Building
$ git clone https://github.com/docker/hyperkit
$ cd hyperkit
$ make
The resulting binary will be in `build/hyperkit`
To enable qcow support in the block backend an OCaml [OPAM](https://opam.ocaml.org) development
environment is required with the qcow module available. A
suitable environment can be setup by installing `opam` and `libev`
via `brew` and using `opam` to install the appropriate libraries:
$ brew install opam libev
$ opam init
$ eval `opam config env`
$ opam install uri qcow.0.8.1 mirage-block-unix.2.6.0 conf-libev logs fmt mirage-unix
Notes:
- `opam config env` must be evaluated each time prior to building
hyperkit so the build will find the ocaml environment.
- Any previous pin of `mirage-block-unix` or `qcow`
should be removed with the commands:
```sh
$ opam update
$ opam pin remove mirage-block-unix
$ opam pin remove qcow
```
## Tracing
HyperKit defines a number of static DTrace probes to simplify investigation of
performance problems. To list the probes supported by your version of HyperKit,
type the following command while HyperKit VM is running:
$ sudo dtrace -l -P 'hyperkit$target' -p $(pgrep hyperkit)
Refer to scripts in dtrace/ directory for examples of possible usage and
available probes.
### Relationship to xhyve and bhyve
HyperKit includes a hypervisor derived from [xhyve](http://www.xhyve.org), which in turn
was derived from [bhyve](http://www.bhyve.org). See the [original xhyve
README](README.xhyve.md) which incorporates the bhyve README.
We try to avoid deviating from these upstreams unnecessarily in order
to more easily share code, for example the various device
models/emulations should be easily shareable.
### Reporting security issues
The maintainers take security seriously. If you discover a security issue,
please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
Security reports are greatly appreciated and we will publicly thank you for it.
We also like to send gifts&mdash;if you're into Docker schwag, make sure to let
us know. We currently do not offer a paid security bounty program, but are not
ruling it out in the future.
## Copyright and license
Copyright the authors and contributors. See individual source files
for details.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.

View File

@ -1,224 +0,0 @@
# [xhyve.org](http://www.xhyve.org)
![](./xhyve_logo.png)
<!-- https://thenounproject.com/term/squirrel/57718/ -->
About
-----
The *xhyve hypervisor* is a port of [bhyve](http://www.bhyve.org) to OS X. It is built on top of [Hypervisor.framework](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/index.html) in OS X 10.10 Yosemite and higher, runs entirely in userspace, and has no other dependencies. It can run FreeBSD and vanilla Linux distributions and may gain support for other guest operating systems in the future.
License: BSD
Introduction: [http://www.pagetable.com/?p=831](http://www.pagetable.com/?p=831)
Requirements
------------
* OS X 10.10.3 Yosemite or later
* a 2010 or later Mac (i.e. a CPU that supports EPT)
Installation
------------
If you have homebrew, then simply:
$ brew update
$ brew install --HEAD xhyve
The `--HEAD` in the brew command ensures that you always get the latest changes, even if the homebrew database is not yet updated. If for any reason you don't want that simply do `brew install xhyve` .
if not then:
Building
--------
$ git clone https://github.com/mist64/xhyve
$ cd xhyve
$ make
The resulting binary will be in build/xhyve
Usage
-----
$ xhyve -h
What is bhyve?
--------------
bhyve is the FreeBSD hypervisor, roughly analogous to KVM + QEMU on Linux. It has a focus on simplicity and being legacy free.
It exposes the following peripherals to virtual machines:
- Local x(2)APIC
- IO-APIC
- 8259A PIC
- 8253/8254 PIT
- HPET
- PM Timer
- RTC
- PCI
- host bridge
- passthrough
- UART
- AHCI (i.e. HDD and CD)
- VirtIO block device
- VirtIO networking
- VirtIO RNG
Notably absent are sound, USB, HID and any kind of graphics support. With a focus on server virtualization this is not strictly a requirement. bhyve may gain desktop virtualization capabilities in the future but this doesn't seem to be a priority.
Unlike QEMU, bhyve also currently lacks any kind of guest-side firmware (QEMU uses the GPL3 [SeaBIOS](http://www.seabios.org)), but aims to provide a compatible [OVMF EFI](http://www.linux-kvm.org/page/OVMF) in the near future. It does however provide ACPI, SMBIOS and MP Tables.
bhyve architecture
------------------
Linux
I/O VM control FreeBSD NetBSD
OpenBSD
| A | A | |
V | V | V V
+-------------++-------------++-------------++-------------+
| || || || |
| bhyve || bhyvectl || bhyveload || grub2-bhyve |
| || || || |
| || || || |
+-------------++-------------++-------------++-------------+
+----------------------------------------------------------+
| libvmmapi |
+----------------------------------------------------------+
A
| user
------------------------------┼------------------------------
| ioctl FreeBSD kernel
V
+----------------------------+
| VMX/SVM host |
| VMX/SVM guest |
| VMX/SVM nested paging |
| Timers |
| Interrupts |
+----------------------------+
vmm.ko
**vmm.ko**
The bhyve FreeBSD kernel module. Manages VM and vCPU objects, the guest physical address space and handles guest interaction with PIC, PIT, HPET, PM Timer, x(2)APIC and I/O-APIC. Contains a minimal x86 emulator to decode guest MMIO. Executes the two innermost vCPU runloops (VMX/SVM and interrupts/timers/paging). Has backends for Intel VMX and AMD SVM. Provides an ioctl and mmap API to userspace.
**libvmmapi**
Thin abstraction layer between the vmm.ko ioctl interface and the userspace C API.
**bhyve**
The userspace bhyve component (kind of a very light-weight QEMU) that executes virtual machines. Runs the guest I/O vCPU runloops. Manages ACPI, PCI and all non in-kernel devices. Interacts with vmm.ko through libvmmapi.
**bhyvectl**
Somewhat superfluous utility to introspect and manage the life cycle of virtual machines. Virtual machines and vCPUs can exist as kernel objects independently of a bhyve host process. Typically used to delete VM objects after use. Odd architectural choice.
**bhyveload**
Userspace port of the FreeBSD bootloader. Since bhyve still lacks a firmware this is a cumbersome workaround to bootstrap a guest operating system. It creates a VM object, loads the FreeBSD kernel into guest memory, sets up the initial vCPU state and then exits. Only then a VM can be executed by bhyve.
**grub2-bhyve**
Performs the same function as bhyveload but is a userspace port of [GRUB2](http://github.com/grehan-freebsd/grub2-bhyve). It is used to bootstrap guest operating systems other than FreeBSD, i.e. Linux, OpenBSD and NetBSD.
Support for Windows guests is work in progress and dependent on the EFI port.
xhyve architecture
------------------
+----------------------------------------------------------+
| xhyve |
| |
| I/O |
| |
| |
| |
|+--------------------------------------------------------+|
|| vmm VMX guest ||
|| Timers ||
|| Interrupts ||
|+--------------------------------------------------------+|
+----------------------------------------------------------+
+----------------------------------------------------------+
| Hypervisor.framework |
+----------------------------------------------------------+
A
| user
------------------------------┼------------------------------
|syscall xnu kernel
V
VMX host
VMX nested paging
xhyve shares most of the code with bhyve but is architecturally very different. Hypervisor.framework provides an interface to the VMX VMCS guest state and a safe subset of the VMCS control fields, thus making userspace hypervisors without any additional kernel extensions possible. The VMX host state and all aspects of nested paging are handled by the OS X kernel, you can manage the guest physical address space simply through mapping of regions of your own address space.
*xhyve* is equivalent to the *bhyve* process but gains a subset of a userspace port of the vmm kernel module. SVM, PCI passthrough and the VMX host and EPT aspects are dropped. The vmm component provides a libvmmapi compatible interface to xhyve. Hypervisor.framework seems to enforce a strict 1:1 relationship between a host process/VM and host thread/vCPU, that means VMs and vCPUs can only be interacted with by the processes and threads that created them. Therefore, unlike bhyve, xhyve needs to adhere to a single process model. Multiple virtual machines can be created by launching multiple instances of xhyve. xhyve retains most of the bhyve command line interface.
*bhyvectl*, *bhyveload* and *grub2-bhyve* are incompatible with a single process model and are dropped. As a stop-gap solution until we have a proper firmware xhyve supports the Linux [kexec protocol](http://www.kernel.org/doc/Documentation/x86/boot.txt), a very simple and straightforward way to bootstrap a Linux kernel. It takes a bzImage and optionally initrd image and kernel parameter string as input.
Networking
------
If you want the same IP address across VM reboots, assign a UUID to a particular VM:
$ xhyve [-U uuid]
**Optional:**
If you need more advanced networking and already have a configured [TAP](http://tuntaposx.sourceforge.net) device you can use it with:
virtio-tap,tapX
instead of:
virtio-net
Where *X* is your tap device, i.e. */dev/tapX*.
Issues
------
If you are, or were, running any version of VirtualBox, prior to 4.3.30 or 5.0,
and attempt to run xhyve your system will immediately crash as a kernel panic is
triggered. This is due to a VirtualBox bug (that got fixed in newest VirtualBox
versions) as VirtualBox wasn't playing nice with OSX's Hypervisor.framework used
by xhyve.
To get around this you either have to update to newest VirtualBox 4.3 or 5.0 or,
if you for some reason are unable to update, to reboot
your Mac after using VirtualBox and before attempting to use xhyve.
(see issues [#5](https://github.com/mist64/xhyve/issues/5) and
[#9](https://github.com/mist64/xhyve/issues/9) for the full context)
TODO
----
- vmm:
- enable APIC access page to speed up APIC emulation (**performance**)
- enable x2APIC MSRs (even faster) (**performance**)
- vmm_callout:
- is a quick'n'dirty implementation of the FreeBSD kernel callout mechanism
- seems to be racy
- fix races or perhaps replace with something better
- use per vCPU timer event thread (**performance**)?
- use hardware VMX preemption timer instead of `pthread_cond_wait` (**performance**)
- some 32-bit guests are broken (support PAE paging in VMCS)
- PCID guest support (**performance**)
- block_if:
- OS X does not support `preadv`/`pwritev`, we need to serialize reads and writes for the time being until we find a better solution. (**performance**)
- support block devices other than plain files
- virtio_net:
- unify TAP and vmnet backends
- vmnet: make it not require root
- vmnet: send/receive more than a single packet at a time (**performance**)
- virtio_rnd:
- is untested
- remove explicit state transitions:
- since only the owning task/thread can modify the VM/vCPUs a lot of the synchronization might be unnecessary (**performance**)
- performance, performance and performance
- remove vestigial code, cleanup

View File

@ -1,534 +0,0 @@
// +build darwin
// Package hyperkit provides a Go wrapper around the hyperkit
// command. It currently shells out to start hyperkit with the
// provided configuration.
//
// Most of the arguments should be self explanatory, but console
// handling deserves a mention. If the Console is configured with
// ConsoleStdio, the hyperkit is started with stdin, stdout, and
// stderr plumbed through to the VM console. If Console is set to
// ConsoleFile hyperkit the console output is redirected to a file and
// console input is disabled. For this mode StateDir has to be set and
// the interactive console is accessible via a 'tty' file created
// there.
//
// Currently this module has some limitations:
// - Only supports zero or one disk image
// - Only support zero or one network interface connected to VPNKit
// - Only kexec boot
//
// This package is currently implemented by shelling out a hyperkit
// process. In the future we may change this to become a wrapper
// around the hyperkit library.
package hyperkit
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"github.com/mitchellh/go-ps"
"github.com/rneugeba/iso9660wrap"
)
const (
// ConsoleStdio configures console to use Stdio
ConsoleStdio = iota
// ConsoleFile configures console to a tty and output to a file
ConsoleFile
defaultVPNKitSock = "Library/Containers/com.docker.docker/Data/s50"
defaultDiskImage = "disk.img"
defaultCPUs = 1
defaultMemory = 1024 // 1G
jsonFile = "hyperkit.json"
pidFile = "hyperkit.pid"
)
var defaultHyperKits = []string{"hyperkit",
"com.docker.hyperkit",
"/usr/local/bin/hyperkit",
"/Applications/Docker.app/Contents/Resources/bin/hyperkit",
"/Applications/Docker.app/Contents/MacOS/com.docker.hyperkit"}
// HyperKit contains the configuration of the hyperkit VM
type HyperKit struct {
// HyperKit is the path to the hyperkit binary
HyperKit string `json:"hyperkit"`
// StateDir is the directory where runtime state is kept. If left empty, no state will be kept.
StateDir string `json:"state_dir"`
// VPNKitSock is the location of the VPNKit socket used for networking.
VPNKitSock string `json:"vpnkit_sock"`
// DiskImage is the path to the disk image to use
DiskImage string `json:"disk"`
// ISOImage is the (optional) path to a ISO image to attach
ISOImage string `json:"iso"`
// Kernel is the path to the kernel image to boot
Kernel string `json:"kernel"`
// Initrd is the path to the initial ramdisk to boot off
Initrd string `json:"initrd"`
// CPUs is the number CPUs to configure
CPUs int `json:"cpus"`
// Memory is the amount of megabytes of memory for the VM
Memory int `json:"memory"`
// DiskSize is the size of the disk image in megabytes. If zero and DiskImage does not exist, no disk will be attached.
DiskSize int `json:"disk_size"`
// Console defines where the console of the VM should be
// connected to. ConsoleStdio and ConsoleFile are supported.
Console int `json:"console"`
// UserData, if non empty, will be added to a ISO under the
// filename `config` and passed to the VM.
UserData string `json:"user_data"`
// Below here are internal members, but they are exported so
// that they are written to the state json file, if configured.
// Pid of the hyperkit process
Pid int `json:"pid"`
// Arguments used to execute the hyperkit process
Arguments []string `json:"arguments"`
// CmdLine is a single string of the command line
CmdLine string `json:"cmdline"`
process *os.Process
background bool
log *log.Logger
}
// New creates a template config structure.
// - If hyperkit can't be found an error is returned.
// - If vpnkitsock is empty no networking is configured. If it is set
// to "auto" it tries to re-use the Docker for Mac VPNKit connection.
// - If statedir is "" no state is written to disk.
func New(hyperkit, statedir, vpnkitsock, diskimage string) (*HyperKit, error) {
h := HyperKit{}
var err error
h.HyperKit, err = checkHyperKit(hyperkit)
if err != nil {
return nil, err
}
h.StateDir = statedir
h.VPNKitSock, err = checkVPNKitSock(vpnkitsock)
if err != nil {
return nil, err
}
h.DiskImage = diskimage
h.CPUs = defaultCPUs
h.Memory = defaultMemory
h.Console = ConsoleStdio
h.UserData = ""
return &h, nil
}
// FromState reads a json file from statedir and populates a HyperKit structure.
func FromState(statedir string) (*HyperKit, error) {
b, err := ioutil.ReadFile(filepath.Join(statedir, jsonFile))
if err != nil {
return nil, fmt.Errorf("Can't read json file: ", err)
}
h := &HyperKit{}
err = json.Unmarshal(b, h)
if err != nil {
return nil, fmt.Errorf("Can't parse json file: ", err)
}
// Make sure the pid written by hyperkit is the same as in the json
d, err := ioutil.ReadFile(filepath.Join(statedir, pidFile))
if err != nil {
return nil, err
}
pid, err := strconv.Atoi(string(d[:]))
if err != nil {
return nil, err
}
if h.Pid != pid {
return nil, fmt.Errorf("pids do not match %d != %d", h.Pid, pid)
}
h.process, err = os.FindProcess(h.Pid)
if err != nil {
return nil, err
}
return h, nil
}
// SetLogger sets the log instance to use for the output of the hyperkit process itself (not the console of the VM).
// This is only relevant when Console is set to ConsoleFile
func (h *HyperKit) SetLogger(logger *log.Logger) {
h.log = logger
}
// Run the VM with a given command line until it exits
func (h *HyperKit) Run(cmdline string) error {
h.background = false
return h.execute(cmdline)
}
// Start the VM with a given command line in the background
func (h *HyperKit) Start(cmdline string) error {
h.background = true
return h.execute(cmdline)
}
func (h *HyperKit) execute(cmdline string) error {
var err error
// Sanity checks on configuration
if h.Console == ConsoleFile && h.StateDir == "" {
return fmt.Errorf("If ConsoleFile is set, StateDir was be specified")
}
if h.UserData != "" && h.ISOImage != "" {
return fmt.Errorf("If UserData is supplied, ISOImage must not be set")
}
if h.ISOImage != "" {
if _, err = os.Stat(h.ISOImage); os.IsNotExist(err) {
return fmt.Errorf("ISO %s does not exist", h.ISOImage)
}
}
if h.UserData != "" && h.StateDir == "" {
return fmt.Errorf("If UserData is supplied, StateDir was be specified")
}
if _, err = os.Stat(h.Kernel); os.IsNotExist(err) {
return fmt.Errorf("Kernel %s does not exist", h.Kernel)
}
if _, err = os.Stat(h.Initrd); os.IsNotExist(err) {
return fmt.Errorf("initrd %s does not exist", h.Initrd)
}
if h.DiskImage == "" && h.StateDir == "" && h.DiskSize != 0 {
return fmt.Errorf("Can't create disk, because neither DiskImage nor StateDir is set")
}
// Create files
if h.StateDir != "" {
err = os.MkdirAll(h.StateDir, 0755)
if err != nil {
return err
}
}
if h.DiskImage == "" && h.DiskSize != 0 {
h.DiskImage = filepath.Join(h.StateDir, "disk.img")
}
if _, err = os.Stat(h.DiskImage); os.IsNotExist(err) {
if h.DiskSize != 0 {
err = CreateDiskImage(h.DiskImage, h.DiskSize)
if err != nil {
return err
}
}
}
if h.UserData != "" {
h.ISOImage, err = createUserDataISO(h.StateDir, h.UserData)
if err != nil {
return err
}
}
// Run
h.buildArgs(cmdline)
err = h.execHyperKit()
if err != nil {
return err
}
return nil
}
// Stop the running VM
func (h *HyperKit) Stop() error {
if h.process == nil {
return fmt.Errorf("hyperkit process not known")
}
if !h.IsRunning() {
return nil
}
err := h.process.Kill()
if err != nil {
return err
}
return nil
}
// IsRunning returns true if the hyperkit process is running.
func (h *HyperKit) IsRunning() bool {
// os.FindProcess on Unix always returns a process object even
// if the process does not exists. There does not seem to be
// a call to find out if the process is running either, so we
// use another package to find out.
proc, err := ps.FindProcess(h.Pid)
if err != nil {
return false
}
if proc == nil {
return false
}
return true
}
// Remove deletes all statefiles if present.
// This also removes the StateDir if empty.
// If keepDisk is set, the diskimage will not get removed.
func (h *HyperKit) Remove(keepDisk bool) error {
if h.IsRunning() {
return fmt.Errorf("Can't remove state as process is running")
}
if h.StateDir == "" {
// If there is not state directory we don't mess with the disk
return nil
}
if !keepDisk {
return os.RemoveAll(h.StateDir)
}
files, _ := ioutil.ReadDir(h.StateDir)
for _, f := range files {
fn := filepath.Clean(filepath.Join(h.StateDir, f.Name()))
if fn == h.DiskImage {
continue
}
err := os.Remove(fn)
if err != nil {
return err
}
}
return nil
}
// Convert to json string
func (h *HyperKit) String() string {
s, err := json.Marshal(h)
if err != nil {
return err.Error()
}
return string(s)
}
// CreateDiskImage creates a empty file suitable for use as a disk image for a hyperkit VM.
func CreateDiskImage(location string, sizeMB int) error {
f, err := os.Create(location)
if err != nil {
return err
}
defer f.Close()
buf := make([]byte, 1048676)
for i := 0; i < sizeMB; i++ {
f.Write(buf)
}
return nil
}
func (h *HyperKit) buildArgs(cmdline string) {
a := []string{"-A", "-u"}
if h.StateDir != "" {
a = append(a, "-F", filepath.Join(h.StateDir, pidFile))
}
a = append(a, "-c", fmt.Sprintf("%d", h.CPUs))
a = append(a, "-m", fmt.Sprintf("%dM", h.Memory))
a = append(a, "-s", "0:0,hostbridge")
if h.VPNKitSock != "" {
a = append(a, "-s", fmt.Sprintf("1:0,virtio-vpnkit,path=%s", h.VPNKitSock))
}
if h.DiskImage != "" {
a = append(a, "-s", fmt.Sprintf("2:0,virtio-blk,%s", h.DiskImage))
}
if h.ISOImage != "" {
a = append(a, "-s", fmt.Sprintf("4,ahci-cd,%s", h.ISOImage))
}
a = append(a, "-s", "10,virtio-rnd")
a = append(a, "-s", "31,lpc")
if h.Console == ConsoleFile {
a = append(a, "-l", fmt.Sprintf("com1,autopty=%s/tty,log=%s/console-ring", h.StateDir, h.StateDir))
} else {
a = append(a, "-l", "com1,stdio")
}
kernArgs := fmt.Sprintf("kexec,%s,%s,earlyprintk=serial %s", h.Kernel, h.Initrd, cmdline)
a = append(a, "-f", kernArgs)
h.Arguments = a
h.CmdLine = h.HyperKit + " " + strings.Join(a, " ")
}
// Execute hyperkit and plumb stdin/stdout/stderr.
func (h *HyperKit) execHyperKit() error {
cmd := exec.Command(h.HyperKit, h.Arguments...)
cmd.Env = os.Environ()
// Plumb in stdin/stdout/stderr. If ConsoleStdio is configured
// plumb them to the system streams. If a logger is specified,
// use it for stdout/stderr logging. Otherwise use the default
// /dev/null.
if h.Console == ConsoleStdio {
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
} else if h.log != nil {
stdoutChan := make(chan string)
stderrChan := make(chan string)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
stream(stdout, stdoutChan)
stream(stderr, stderrChan)
done := make(chan struct{})
go func() {
for {
select {
case stderrl := <-stderrChan:
log.Printf("%s", stderrl)
case stdoutl := <-stdoutChan:
log.Printf("%s", stdoutl)
case <-done:
return
}
}
}()
}
err := cmd.Start()
if err != nil {
return err
}
h.Pid = cmd.Process.Pid
h.process = cmd.Process
err = h.writeState()
if err != nil {
h.process.Kill()
return err
}
if !h.background {
err = cmd.Wait()
if err != nil {
return err
}
}
return nil
}
// writeState write the state to a JSON file
func (h *HyperKit) writeState() error {
if h.StateDir == "" {
// This is not an error
return nil
}
s, err := json.Marshal(h)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(h.StateDir, jsonFile), []byte(s), 0644)
}
func stream(r io.ReadCloser, dest chan<- string) {
go func() {
defer r.Close()
reader := bufio.NewReader(r)
for {
line, err := reader.ReadString('\n')
if err != nil {
return
}
dest <- line
}
}()
}
// Create a ISO with Userdata in the specified directory
func createUserDataISO(dir string, init string) (string, error) {
cfgName := filepath.Join(dir, "config")
isoName := cfgName + ".iso"
if err := ioutil.WriteFile(cfgName, []byte(init), 0644); err != nil {
return "", err
}
outfh, err := os.OpenFile(isoName, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
if err != nil {
return "", err
}
infh, err := os.Open(cfgName)
if err != nil {
return "", err
}
err = iso9660wrap.WriteFile(outfh, infh)
if err != nil {
return "", err
}
return isoName, nil
}
// checkHyperKit tries to find and/or validate the path of hyperkit
func checkHyperKit(hyperkit string) (string, error) {
if hyperkit != "" {
p, err := exec.LookPath(hyperkit)
if err != nil {
return "", fmt.Errorf("Could not find hyperkit executable: ", hyperkit)
}
return p, nil
}
// Look in a number of default locations
for _, hyperkit := range defaultHyperKits {
p, err := exec.LookPath(hyperkit)
if err == nil {
return p, nil
}
}
return "", fmt.Errorf("Could not find hyperkit executable")
}
// checkVPNKitSock tries to find and/or validate the path of the VPNKit socket
func checkVPNKitSock(vpnkitsock string) (string, error) {
if vpnkitsock == "auto" {
vpnkitsock = filepath.Join(getHome(), defaultVPNKitSock)
}
vpnkitsock = filepath.Clean(vpnkitsock)
_, err := os.Stat(vpnkitsock)
if err != nil {
return "", err
}
return vpnkitsock, nil
}
func getHome() string {
if usr, err := user.Current(); err == nil {
return usr.HomeDir
}
return os.Getenv("HOME")
}

@ -1 +0,0 @@
Subproject commit 6516ace03d405955f738f8965abde5d9ab37fef6

@ -1 +0,0 @@
Subproject commit 08b5f424b9271eedf6f9f0ce86cb9396ed337a42

@ -1 +0,0 @@
Subproject commit 999ef73f5d50979cf6d12afed1726325b63f9570

@ -1 +0,0 @@
Subproject commit 22c016f3df3febe0c1f6727598b6389507e03a18

@ -1 +0,0 @@
Subproject commit 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75

@ -1 +0,0 @@
Subproject commit bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d

View File

@ -1,34 +0,0 @@
# Process List Library for Go
go-ps is a library for Go that implements OS-specific APIs to list and
manipulate processes in a platform-safe way. The library can find and
list processes on Linux, Mac OS X, Solaris, and Windows.
If you're new to Go, this library has a good amount of advanced Go educational
value as well. It uses some advanced features of Go: build tags, accessing
DLL methods for Windows, cgo for Darwin, etc.
How it works:
* **Darwin** uses the `sysctl` syscall to retrieve the process table.
* **Unix** uses the procfs at `/proc` to inspect the process tree.
* **Windows** uses the Windows API, and methods such as
`CreateToolhelp32Snapshot` to get a point-in-time snapshot of
the process table.
## Installation
Install using standard `go get`:
```
$ go get github.com/mitchellh/go-ps
...
```
## TODO
Want to contribute? Here is a short TODO list of things that aren't
implemented for this library that would be nice:
* FreeBSD support
* Plan9 support

View File

@ -1,40 +0,0 @@
// ps provides an API for finding and listing processes in a platform-agnostic
// way.
//
// NOTE: If you're reading these docs online via GoDocs or some other system,
// you might only see the Unix docs. This project makes heavy use of
// platform-specific implementations. We recommend reading the source if you
// are interested.
package ps
// Process is the generic interface that is implemented on every platform
// and provides common operations for processes.
type Process interface {
// Pid is the process ID for this process.
Pid() int
// PPid is the parent process ID for this process.
PPid() int
// Executable name running this process. This is not a path to the
// executable.
Executable() string
}
// Processes returns all processes.
//
// This of course will be a point-in-time snapshot of when this method was
// called. Some operating systems don't provide snapshot capability of the
// process table, in which case the process table returned might contain
// ephemeral entities that happened to be running when this was called.
func Processes() ([]Process, error) {
return processes()
}
// FindProcess looks up a single process by pid.
//
// Process will be nil and error will be nil if a matching process is
// not found.
func FindProcess(pid int) (Process, error) {
return findProcess(pid)
}

View File

@ -1,138 +0,0 @@
// +build darwin
package ps
import (
"bytes"
"encoding/binary"
"syscall"
"unsafe"
)
type DarwinProcess struct {
pid int
ppid int
binary string
}
func (p *DarwinProcess) Pid() int {
return p.pid
}
func (p *DarwinProcess) PPid() int {
return p.ppid
}
func (p *DarwinProcess) Executable() string {
return p.binary
}
func findProcess(pid int) (Process, error) {
ps, err := processes()
if err != nil {
return nil, err
}
for _, p := range ps {
if p.Pid() == pid {
return p, nil
}
}
return nil, nil
}
func processes() ([]Process, error) {
buf, err := darwinSyscall()
if err != nil {
return nil, err
}
procs := make([]*kinfoProc, 0, 50)
k := 0
for i := _KINFO_STRUCT_SIZE; i < buf.Len(); i += _KINFO_STRUCT_SIZE {
proc := &kinfoProc{}
err = binary.Read(bytes.NewBuffer(buf.Bytes()[k:i]), binary.LittleEndian, proc)
if err != nil {
return nil, err
}
k = i
procs = append(procs, proc)
}
darwinProcs := make([]Process, len(procs))
for i, p := range procs {
darwinProcs[i] = &DarwinProcess{
pid: int(p.Pid),
ppid: int(p.PPid),
binary: darwinCstring(p.Comm),
}
}
return darwinProcs, nil
}
func darwinCstring(s [16]byte) string {
i := 0
for _, b := range s {
if b != 0 {
i++
} else {
break
}
}
return string(s[:i])
}
func darwinSyscall() (*bytes.Buffer, error) {
mib := [4]int32{_CTRL_KERN, _KERN_PROC, _KERN_PROC_ALL, 0}
size := uintptr(0)
_, _, errno := syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
4,
0,
uintptr(unsafe.Pointer(&size)),
0,
0)
if errno != 0 {
return nil, errno
}
bs := make([]byte, size)
_, _, errno = syscall.Syscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
4,
uintptr(unsafe.Pointer(&bs[0])),
uintptr(unsafe.Pointer(&size)),
0,
0)
if errno != 0 {
return nil, errno
}
return bytes.NewBuffer(bs[0:size]), nil
}
const (
_CTRL_KERN = 1
_KERN_PROC = 14
_KERN_PROC_ALL = 0
_KINFO_STRUCT_SIZE = 648
)
type kinfoProc struct {
_ [40]byte
Pid int32
_ [199]byte
Comm [16]byte
_ [301]byte
PPid int32
_ [84]byte
}

View File

@ -1,260 +0,0 @@
// +build freebsd,amd64
package ps
import (
"bytes"
"encoding/binary"
"syscall"
"unsafe"
)
// copied from sys/sysctl.h
const (
CTL_KERN = 1 // "high kernel": proc, limits
KERN_PROC = 14 // struct: process entries
KERN_PROC_PID = 1 // by process id
KERN_PROC_PROC = 8 // only return procs
KERN_PROC_PATHNAME = 12 // path to executable
)
// copied from sys/user.h
type Kinfo_proc struct {
Ki_structsize int32
Ki_layout int32
Ki_args int64
Ki_paddr int64
Ki_addr int64
Ki_tracep int64
Ki_textvp int64
Ki_fd int64
Ki_vmspace int64
Ki_wchan int64
Ki_pid int32
Ki_ppid int32
Ki_pgid int32
Ki_tpgid int32
Ki_sid int32
Ki_tsid int32
Ki_jobc [2]byte
Ki_spare_short1 [2]byte
Ki_tdev int32
Ki_siglist [16]byte
Ki_sigmask [16]byte
Ki_sigignore [16]byte
Ki_sigcatch [16]byte
Ki_uid int32
Ki_ruid int32
Ki_svuid int32
Ki_rgid int32
Ki_svgid int32
Ki_ngroups [2]byte
Ki_spare_short2 [2]byte
Ki_groups [64]byte
Ki_size int64
Ki_rssize int64
Ki_swrss int64
Ki_tsize int64
Ki_dsize int64
Ki_ssize int64
Ki_xstat [2]byte
Ki_acflag [2]byte
Ki_pctcpu int32
Ki_estcpu int32
Ki_slptime int32
Ki_swtime int32
Ki_cow int32
Ki_runtime int64
Ki_start [16]byte
Ki_childtime [16]byte
Ki_flag int64
Ki_kiflag int64
Ki_traceflag int32
Ki_stat [1]byte
Ki_nice [1]byte
Ki_lock [1]byte
Ki_rqindex [1]byte
Ki_oncpu [1]byte
Ki_lastcpu [1]byte
Ki_ocomm [17]byte
Ki_wmesg [9]byte
Ki_login [18]byte
Ki_lockname [9]byte
Ki_comm [20]byte
Ki_emul [17]byte
Ki_sparestrings [68]byte
Ki_spareints [36]byte
Ki_cr_flags int32
Ki_jid int32
Ki_numthreads int32
Ki_tid int32
Ki_pri int32
Ki_rusage [144]byte
Ki_rusage_ch [144]byte
Ki_pcb int64
Ki_kstack int64
Ki_udata int64
Ki_tdaddr int64
Ki_spareptrs [48]byte
Ki_spareint64s [96]byte
Ki_sflag int64
Ki_tdflags int64
}
// UnixProcess is an implementation of Process that contains Unix-specific
// fields and information.
type UnixProcess struct {
pid int
ppid int
state rune
pgrp int
sid int
binary string
}
func (p *UnixProcess) Pid() int {
return p.pid
}
func (p *UnixProcess) PPid() int {
return p.ppid
}
func (p *UnixProcess) Executable() string {
return p.binary
}
// Refresh reloads all the data associated with this process.
func (p *UnixProcess) Refresh() error {
mib := []int32{CTL_KERN, KERN_PROC, KERN_PROC_PID, int32(p.pid)}
buf, length, err := call_syscall(mib)
if err != nil {
return err
}
proc_k := Kinfo_proc{}
if length != uint64(unsafe.Sizeof(proc_k)) {
return err
}
k, err := parse_kinfo_proc(buf)
if err != nil {
return err
}
p.ppid, p.pgrp, p.sid, p.binary = copy_params(&k)
return nil
}
func copy_params(k *Kinfo_proc) (int, int, int, string) {
n := -1
for i, b := range k.Ki_comm {
if b == 0 {
break
}
n = i + 1
}
comm := string(k.Ki_comm[:n])
return int(k.Ki_ppid), int(k.Ki_pgid), int(k.Ki_sid), comm
}
func findProcess(pid int) (Process, error) {
mib := []int32{CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, int32(pid)}
_, _, err := call_syscall(mib)
if err != nil {
return nil, err
}
return newUnixProcess(pid)
}
func processes() ([]Process, error) {
results := make([]Process, 0, 50)
mib := []int32{CTL_KERN, KERN_PROC, KERN_PROC_PROC, 0}
buf, length, err := call_syscall(mib)
if err != nil {
return results, err
}
// get kinfo_proc size
k := Kinfo_proc{}
procinfo_len := int(unsafe.Sizeof(k))
count := int(length / uint64(procinfo_len))
// parse buf to procs
for i := 0; i < count; i++ {
b := buf[i*procinfo_len : i*procinfo_len+procinfo_len]
k, err := parse_kinfo_proc(b)
if err != nil {
continue
}
p, err := newUnixProcess(int(k.Ki_pid))
if err != nil {
continue
}
p.ppid, p.pgrp, p.sid, p.binary = copy_params(&k)
results = append(results, p)
}
return results, nil
}
func parse_kinfo_proc(buf []byte) (Kinfo_proc, error) {
var k Kinfo_proc
br := bytes.NewReader(buf)
err := binary.Read(br, binary.LittleEndian, &k)
if err != nil {
return k, err
}
return k, nil
}
func call_syscall(mib []int32) ([]byte, uint64, error) {
miblen := uint64(len(mib))
// get required buffer size
length := uint64(0)
_, _, err := syscall.RawSyscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
uintptr(miblen),
0,
uintptr(unsafe.Pointer(&length)),
0,
0)
if err != 0 {
b := make([]byte, 0)
return b, length, err
}
if length == 0 {
b := make([]byte, 0)
return b, length, err
}
// get proc info itself
buf := make([]byte, length)
_, _, err = syscall.RawSyscall6(
syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])),
uintptr(miblen),
uintptr(unsafe.Pointer(&buf[0])),
uintptr(unsafe.Pointer(&length)),
0,
0)
if err != 0 {
return buf, length, err
}
return buf, length, nil
}
func newUnixProcess(pid int) (*UnixProcess, error) {
p := &UnixProcess{pid: pid}
return p, p.Refresh()
}

View File

@ -1,35 +0,0 @@
// +build linux
package ps
import (
"fmt"
"io/ioutil"
"strings"
)
// Refresh reloads all the data associated with this process.
func (p *UnixProcess) Refresh() error {
statPath := fmt.Sprintf("/proc/%d/stat", p.pid)
dataBytes, err := ioutil.ReadFile(statPath)
if err != nil {
return err
}
// First, parse out the image name
data := string(dataBytes)
binStart := strings.IndexRune(data, '(') + 1
binEnd := strings.IndexRune(data[binStart:], ')')
p.binary = data[binStart : binStart+binEnd]
// Move past the image name and start parsing the rest
data = data[binStart+binEnd+2:]
_, err = fmt.Sscanf(data,
"%c %d %d %d",
&p.state,
&p.ppid,
&p.pgrp,
&p.sid)
return err
}

View File

@ -1,96 +0,0 @@
// +build solaris
package ps
import (
"encoding/binary"
"fmt"
"os"
)
type ushort_t uint16
type id_t int32
type pid_t int32
type uid_t int32
type gid_t int32
type dev_t uint64
type size_t uint64
type uintptr_t uint64
type timestruc_t [16]byte
// This is copy from /usr/include/sys/procfs.h
type psinfo_t struct {
Pr_flag int32 /* process flags (DEPRECATED; do not use) */
Pr_nlwp int32 /* number of active lwps in the process */
Pr_pid pid_t /* unique process id */
Pr_ppid pid_t /* process id of parent */
Pr_pgid pid_t /* pid of process group leader */
Pr_sid pid_t /* session id */
Pr_uid uid_t /* real user id */
Pr_euid uid_t /* effective user id */
Pr_gid gid_t /* real group id */
Pr_egid gid_t /* effective group id */
Pr_addr uintptr_t /* address of process */
Pr_size size_t /* size of process image in Kbytes */
Pr_rssize size_t /* resident set size in Kbytes */
Pr_pad1 size_t
Pr_ttydev dev_t /* controlling tty device (or PRNODEV) */
// Guess this following 2 ushort_t values require a padding to properly
// align to the 64bit mark.
Pr_pctcpu ushort_t /* % of recent cpu time used by all lwps */
Pr_pctmem ushort_t /* % of system memory used by process */
Pr_pad64bit [4]byte
Pr_start timestruc_t /* process start time, from the epoch */
Pr_time timestruc_t /* usr+sys cpu time for this process */
Pr_ctime timestruc_t /* usr+sys cpu time for reaped children */
Pr_fname [16]byte /* name of execed file */
Pr_psargs [80]byte /* initial characters of arg list */
Pr_wstat int32 /* if zombie, the wait() status */
Pr_argc int32 /* initial argument count */
Pr_argv uintptr_t /* address of initial argument vector */
Pr_envp uintptr_t /* address of initial environment vector */
Pr_dmodel [1]byte /* data model of the process */
Pr_pad2 [3]byte
Pr_taskid id_t /* task id */
Pr_projid id_t /* project id */
Pr_nzomb int32 /* number of zombie lwps in the process */
Pr_poolid id_t /* pool id */
Pr_zoneid id_t /* zone id */
Pr_contract id_t /* process contract */
Pr_filler int32 /* reserved for future use */
Pr_lwp [128]byte /* information for representative lwp */
}
func (p *UnixProcess) Refresh() error {
var psinfo psinfo_t
path := fmt.Sprintf("/proc/%d/psinfo", p.pid)
fh, err := os.Open(path)
if err != nil {
return err
}
defer fh.Close()
err = binary.Read(fh, binary.LittleEndian, &psinfo)
if err != nil {
return err
}
p.ppid = int(psinfo.Pr_ppid)
p.binary = toString(psinfo.Pr_fname[:], 16)
return nil
}
func toString(array []byte, len int) string {
for i := 0; i < len; i++ {
if array[i] == 0 {
return string(array[:i])
}
}
return string(array[:])
}

View File

@ -1,101 +0,0 @@
// +build linux solaris
package ps
import (
"fmt"
"io"
"os"
"strconv"
)
// UnixProcess is an implementation of Process that contains Unix-specific
// fields and information.
type UnixProcess struct {
pid int
ppid int
state rune
pgrp int
sid int
binary string
}
func (p *UnixProcess) Pid() int {
return p.pid
}
func (p *UnixProcess) PPid() int {
return p.ppid
}
func (p *UnixProcess) Executable() string {
return p.binary
}
func findProcess(pid int) (Process, error) {
dir := fmt.Sprintf("/proc/%d", pid)
_, err := os.Stat(dir)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
return newUnixProcess(pid)
}
func processes() ([]Process, error) {
d, err := os.Open("/proc")
if err != nil {
return nil, err
}
defer d.Close()
results := make([]Process, 0, 50)
for {
fis, err := d.Readdir(10)
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
for _, fi := range fis {
// We only care about directories, since all pids are dirs
if !fi.IsDir() {
continue
}
// We only care if the name starts with a numeric
name := fi.Name()
if name[0] < '0' || name[0] > '9' {
continue
}
// From this point forward, any errors we just ignore, because
// it might simply be that the process doesn't exist anymore.
pid, err := strconv.ParseInt(name, 10, 0)
if err != nil {
continue
}
p, err := newUnixProcess(int(pid))
if err != nil {
continue
}
results = append(results, p)
}
}
return results, nil
}
func newUnixProcess(pid int) (*UnixProcess, error) {
p := &UnixProcess{pid: pid}
return p, p.Refresh()
}

View File

@ -1,119 +0,0 @@
// +build windows
package ps
import (
"fmt"
"syscall"
"unsafe"
)
// Windows API functions
var (
modKernel32 = syscall.NewLazyDLL("kernel32.dll")
procCloseHandle = modKernel32.NewProc("CloseHandle")
procCreateToolhelp32Snapshot = modKernel32.NewProc("CreateToolhelp32Snapshot")
procProcess32First = modKernel32.NewProc("Process32FirstW")
procProcess32Next = modKernel32.NewProc("Process32NextW")
)
// Some constants from the Windows API
const (
ERROR_NO_MORE_FILES = 0x12
MAX_PATH = 260
)
// PROCESSENTRY32 is the Windows API structure that contains a process's
// information.
type PROCESSENTRY32 struct {
Size uint32
CntUsage uint32
ProcessID uint32
DefaultHeapID uintptr
ModuleID uint32
CntThreads uint32
ParentProcessID uint32
PriorityClassBase int32
Flags uint32
ExeFile [MAX_PATH]uint16
}
// WindowsProcess is an implementation of Process for Windows.
type WindowsProcess struct {
pid int
ppid int
exe string
}
func (p *WindowsProcess) Pid() int {
return p.pid
}
func (p *WindowsProcess) PPid() int {
return p.ppid
}
func (p *WindowsProcess) Executable() string {
return p.exe
}
func newWindowsProcess(e *PROCESSENTRY32) *WindowsProcess {
// Find when the string ends for decoding
end := 0
for {
if e.ExeFile[end] == 0 {
break
}
end++
}
return &WindowsProcess{
pid: int(e.ProcessID),
ppid: int(e.ParentProcessID),
exe: syscall.UTF16ToString(e.ExeFile[:end]),
}
}
func findProcess(pid int) (Process, error) {
ps, err := processes()
if err != nil {
return nil, err
}
for _, p := range ps {
if p.Pid() == pid {
return p, nil
}
}
return nil, nil
}
func processes() ([]Process, error) {
handle, _, _ := procCreateToolhelp32Snapshot.Call(
0x00000002,
0)
if handle < 0 {
return nil, syscall.GetLastError()
}
defer procCloseHandle.Call(handle)
var entry PROCESSENTRY32
entry.Size = uint32(unsafe.Sizeof(entry))
ret, _, _ := procProcess32First.Call(handle, uintptr(unsafe.Pointer(&entry)))
if ret == 0 {
return nil, fmt.Errorf("Error retrieving process info.")
}
results := make([]Process, 0, 50)
for {
results = append(results, newWindowsProcess(&entry))
ret, _, _ := procProcess32Next.Call(handle, uintptr(unsafe.Pointer(&entry)))
if ret == 0 {
break
}
}
return results, nil
}

View File

@ -1,4 +0,0 @@
iso9660wrap
===========
This turns the [iso9660wrap](https://github.com/johto/iso9660wrap) utility into a package. It provides a simple means to create an ISO9660 file containing a single file.

View File

@ -1,66 +0,0 @@
package iso9660wrap
import (
"time"
)
func WriteDirectoryRecord(w *SectorWriter, identifier string, firstSectorNum uint32) uint32 {
if len(identifier) > 30 {
Panicf("directory identifier length %d is out of bounds", len(identifier))
}
recordLength := 33 + len(identifier)
w.WriteByte(byte(recordLength))
w.WriteByte(0) // number of sectors in extended attribute record
w.WriteBothEndianDWord(firstSectorNum)
w.WriteBothEndianDWord(SectorSize) // directory length
writeDirectoryRecordtimestamp(w, time.Now())
w.WriteByte(byte(3)) // bitfield; directory
w.WriteByte(byte(0)) // file unit size for an interleaved file
w.WriteByte(byte(0)) // interleave gap size for an interleaved file
w.WriteBothEndianWord(1) // volume sequence number
w.WriteByte(byte(len(identifier)))
w.WriteString(identifier)
// optional padding to even length
if recordLength%2 == 1 {
recordLength++
w.WriteByte(0)
}
return uint32(recordLength)
}
func WriteFileRecordHeader(w *SectorWriter, identifier string, firstSectorNum uint32, fileSize uint32) uint32 {
if len(identifier) > 30 {
Panicf("directory identifier length %d is out of bounds", len(identifier))
}
recordLength := 33 + len(identifier)
w.WriteByte(byte(recordLength))
w.WriteByte(0) // number of sectors in extended attribute record
w.WriteBothEndianDWord(firstSectorNum) // first sector
w.WriteBothEndianDWord(fileSize)
writeDirectoryRecordtimestamp(w, time.Now())
w.WriteByte(byte(0)) // bitfield; normal file
w.WriteByte(byte(0)) // file unit size for an interleaved file
w.WriteByte(byte(0)) // interleave gap size for an interleaved file
w.WriteBothEndianWord(1) // volume sequence number
w.WriteByte(byte(len(identifier)))
w.WriteString(identifier)
// optional padding to even length
if recordLength%2 == 1 {
recordLength++
w.WriteByte(0)
}
return uint32(recordLength)
}
func writeDirectoryRecordtimestamp(w *SectorWriter, t time.Time) {
t = t.UTC()
w.WriteByte(byte(t.Year() - 1900))
w.WriteByte(byte(t.Month()))
w.WriteByte(byte(t.Day()))
w.WriteByte(byte(t.Hour()))
w.WriteByte(byte(t.Minute()))
w.WriteByte(byte(t.Second()))
w.WriteByte(0) // UTC offset
}

View File

@ -1,148 +0,0 @@
package iso9660wrap
import (
"encoding/binary"
"io"
"math"
"strings"
"time"
)
const SectorSize uint32 = 2048
type SectorWriter struct {
w io.Writer
p uint32
}
func (w *SectorWriter) Write(p []byte) uint32 {
if len(p) >= math.MaxUint32 {
Panicf("attempted write of length %d is out of sector bounds", len(p))
}
l := uint32(len(p))
if l > w.RemainingSpace() {
Panicf("attempted write of length %d at offset %d is out of sector bounds", w.p, len(p))
}
w.p += l
_, err := w.w.Write(p)
if err != nil {
panic(err)
}
return l
}
func (w *SectorWriter) WriteUnspecifiedDateTime() uint32 {
b := make([]byte, 17)
for i := 0; i < 16; i++ {
b[i] = '0'
}
b[16] = 0
return w.Write(b)
}
func (w *SectorWriter) WriteDateTime(t time.Time) uint32 {
f := t.UTC().Format("20060102150405")
f += "00" // 1/100
f += "\x00" // UTC offset
if len(f) != 17 {
Panicf("date and time field %q is of unexpected length %d", f, len(f))
}
return w.WriteString(f)
}
func (w *SectorWriter) WriteString(str string) uint32 {
return w.Write([]byte(str))
}
func (w *SectorWriter) WritePaddedString(str string, length uint32) uint32 {
l := w.WriteString(str)
if l > 32 {
Panicf("padded string %q exceeds length %d", str, length)
} else if l < 32 {
w.WriteString(strings.Repeat(" ", int(32-l)))
}
return 32
}
func (w *SectorWriter) WriteByte(b byte) uint32 {
return w.Write([]byte{b})
}
func (w *SectorWriter) WriteWord(bo binary.ByteOrder, word uint16) uint32 {
b := make([]byte, 2)
bo.PutUint16(b, word)
return w.Write(b)
}
func (w *SectorWriter) WriteBothEndianWord(word uint16) uint32 {
w.WriteWord(binary.LittleEndian, word)
w.WriteWord(binary.BigEndian, word)
return 4
}
func (w *SectorWriter) WriteDWord(bo binary.ByteOrder, dword uint32) uint32 {
b := make([]byte, 4)
bo.PutUint32(b, dword)
return w.Write(b)
}
func (w *SectorWriter) WriteLittleEndianDWord(dword uint32) uint32 {
return w.WriteDWord(binary.LittleEndian, dword)
}
func (w *SectorWriter) WriteBigEndianDWord(dword uint32) uint32 {
return w.WriteDWord(binary.BigEndian, dword)
}
func (w *SectorWriter) WriteBothEndianDWord(dword uint32) uint32 {
w.WriteLittleEndianDWord(dword)
w.WriteBigEndianDWord(dword)
return 8
}
func (w *SectorWriter) WriteZeros(c int) uint32 {
return w.Write(make([]byte, c))
}
func (w *SectorWriter) PadWithZeros() uint32 {
return w.Write(make([]byte, w.RemainingSpace()))
}
func (w *SectorWriter) RemainingSpace() uint32 {
return SectorSize - w.p
}
func (w *SectorWriter) Reset() {
w.p = 0
}
type ISO9660Writer struct {
sw *SectorWriter
sectorNum uint32
}
func (w *ISO9660Writer) CurrentSector() uint32 {
return uint32(w.sectorNum)
}
func (w *ISO9660Writer) NextSector() *SectorWriter {
if w.sw.RemainingSpace() == SectorSize {
Panicf("internal error: tried to leave sector %d empty", w.sectorNum)
}
w.sw.PadWithZeros()
w.sw.Reset()
w.sectorNum++
return w.sw
}
func (w *ISO9660Writer) Finish() {
if w.sw.RemainingSpace() != SectorSize {
w.sw.PadWithZeros()
}
w.sw = nil
}
func NewISO9660Writer(w io.Writer) *ISO9660Writer {
// start at the end of the last reserved sector
return &ISO9660Writer{&SectorWriter{w, SectorSize}, 16 - 1}
}

View File

@ -1,367 +0,0 @@
package iso9660wrap
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"math"
"os"
"strings"
"time"
)
const reservedAreaData string = `
Once upon a midnight dreary, while I pondered, weak and weary,
Over many a quaint and curious volume of forgotten lore
While I nodded, nearly napping, suddenly there came a tapping,
As of some one gently rapping, rapping at my chamber door.
Tis some visitor, I muttered, tapping at my chamber door
Only this and nothing more.
Ah, distinctly I remember it was in the bleak December;
And each separate dying ember wrought its ghost upon the floor.
Eagerly I wished the morrow;vainly I had sought to borrow
From my books surcease of sorrowsorrow for the lost Lenore
For the rare and radiant maiden whom the angels name Lenore
Nameless here for evermore.
And the silken, sad, uncertain rustling of each purple curtain
Thrilled mefilled me with fantastic terrors never felt before;
So that now, to still the beating of my heart, I stood repeating
Tis some visitor entreating entrance at my chamber door
Some late visitor entreating entrance at my chamber door;
This it is and nothing more.
Presently my soul grew stronger; hesitating then no longer,
Sir, said I, or Madam, truly your forgiveness I implore;
But the fact is I was napping, and so gently you came rapping,
And so faintly you came tapping, tapping at my chamber door,
That I scarce was sure I heard youhere I opened wide the door;
Darkness there and nothing more.
Deep into that darkness peering, long I stood there wondering, fearing,
Doubting, dreaming dreams no mortal ever dared to dream before;
But the silence was unbroken, and the stillness gave no token,
And the only word there spoken was the whispered word, Lenore?
This I whispered, and an echo murmured back the word, Lenore!
Merely this and nothing more.
Back into the chamber turning, all my soul within me burning,
Soon again I heard a tapping somewhat louder than before.
Surely, said I, surely that is something at my window lattice;
Let me see, then, what thereat is, and this mystery explore
Let my heart be still a moment and this mystery explore;
Tis the wind and nothing more!
Open here I flung the shutter, when, with many a flirt and flutter,
In there stepped a stately Raven of the saintly days of yore;
Not the least obeisance made he; not a minute stopped or stayed he;
But, with mien of lord or lady, perched above my chamber door
Perched upon a bust of Pallas just above my chamber door
Perched, and sat, and nothing more.
Then this ebony bird beguiling my sad fancy into smiling,
By the grave and stern decorum of the countenance it wore,
Though thy crest be shorn and shaven, thou, I said, art sure no craven,
Ghastly grim and ancient Raven wandering from the Nightly shore
Tell me what thy lordly name is on the Nights Plutonian shore!
Quoth the Raven Nevermore.
Much I marvelled this ungainly fowl to hear discourse so plainly,
Though its answer little meaninglittle relevancy bore;
For we cannot help agreeing that no living human being
Ever yet was blessed with seeing bird above his chamber door
Bird or beast upon the sculptured bust above his chamber door,
With such name as Nevermore.
But the Raven, sitting lonely on the placid bust, spoke only
That one word, as if his soul in that one word he did outpour.
Nothing farther then he utterednot a feather then he fluttered
Till I scarcely more than muttered Other friends have flown before
On the morrow he will leave me, as my Hopes have flown before.
Then the bird said Nevermore.
Startled at the stillness broken by reply so aptly spoken,
Doubtless, said I, what it utters is its only stock and store
Caught from some unhappy master whom unmerciful Disaster
Followed fast and followed faster till his songs one burden bore
Till the dirges of his Hope that melancholy burden bore
Of Nevernevermore.
But the Raven still beguiling all my fancy into smiling,
Straight I wheeled a cushioned seat in front of bird, and bust and door;
Then, upon the velvet sinking, I betook myself to linking
Fancy unto fancy, thinking what this ominous bird of yore
What this grim, ungainly, ghastly, gaunt, and ominous bird of yore
Meant in croaking Nevermore.
This I sat engaged in guessing, but no syllable expressing
To the fowl whose fiery eyes now burned into my bosoms core;
This and more I sat divining, with my head at ease reclining
On the cushions velvet lining that the lamp-light gloated oer,
But whose velvet-violet lining with the lamp-light gloating oer,
She shall press, ah, nevermore!
Then, methought, the air grew denser, perfumed from an unseen censer
Swung by Seraphim whose foot-falls tinkled on the tufted floor.
Wretch, I cried, thy God hath lent theeby these angels he hath sent thee
Respiterespite and nepenthe from thy memories of Lenore;
Quaff, oh quaff this kind nepenthe and forget this lost Lenore!
Quoth the Raven Nevermore.
Prophet! said I, thing of evil!prophet still, if bird or devil!
Whether Tempter sent, or whether tempest tossed thee here ashore,
Desolate yet all undaunted, on this desert land enchanted
On this home by Horror hauntedtell me truly, I implore
Is thereis there balm in Gilead?tell metell me, I implore!
Quoth the Raven Nevermore.
Prophet! said I, thing of evil!prophet still, if bird or devil!
By that Heaven that bends above usby that God we both adore
Tell this soul with sorrow laden if, within the distant Aidenn,
It shall clasp a sainted maiden whom the angels name Lenore
Clasp a rare and radiant maiden whom the angels name Lenore.
Quoth the Raven Nevermore.
Be that word our sign of parting, bird or fiend! I shrieked, upstarting
Get thee back into the tempest and the Nights Plutonian shore!
Leave no black plume as a token of that lie thy soul hath spoken!
Leave my loneliness unbroken!quit the bust above my door!
Take thy beak from out my heart, and take thy form from off my door!
Quoth the Raven Nevermore.
And the Raven, never flitting, still is sitting, still is sitting
On the pallid bust of Pallas just above my chamber door;
And his eyes have all the seeming of a demons that is dreaming,
And the lamp-light oer him streaming throws his shadow on the floor;
And my soul from out that shadow that lies floating on the floor
Shall be liftednevermore!
`
func Panicf(format string, v ...interface{}) {
panic(fmt.Errorf(format, v...))
}
const volumeDescriptorSetMagic = "\x43\x44\x30\x30\x31\x01"
const primaryVolumeSectorNum uint32 = 16
const numVolumeSectors uint32 = 2 // primary + terminator
const littleEndianPathTableSectorNum uint32 = primaryVolumeSectorNum + numVolumeSectors
const bigEndianPathTableSectorNum uint32 = littleEndianPathTableSectorNum + 1
const numPathTableSectors = 2 // no secondaries
const rootDirectorySectorNum uint32 = primaryVolumeSectorNum + numVolumeSectors + numPathTableSectors
// WriteFile writes the contents of infh to an iso at outfh with the name provided
func WriteFile(outfh, infh *os.File) error {
inputFileSize, inputFilename, err := getInputFileSizeAndName(infh)
if err != nil {
return err
}
if inputFileSize == 0 {
return fmt.Errorf("input file must be at least 1 byte in size")
}
inputFilename = strings.ToUpper(inputFilename)
if !filenameSatisfiesISOConstraints(inputFilename) {
return fmt.Errorf("Input file name %s does not satisfy the ISO9660 character set constraints", inputFilename)
}
// reserved sectors
reservedAreaLength := int64(16 * SectorSize)
_, err = outfh.Write([]byte(reservedAreaData))
if err != nil {
return fmt.Errorf("could not write to output file: %s", err)
}
err = outfh.Truncate(reservedAreaLength)
if err != nil {
return fmt.Errorf("could not truncate output file: %s", err)
}
_, err = outfh.Seek(reservedAreaLength, os.SEEK_SET)
if err != nil {
return fmt.Errorf("could not seek output file: %s", err)
}
err = nil
func() {
defer func() {
var ok bool
e := recover()
if e != nil {
err, ok = e.(error)
if !ok {
panic(e)
}
}
}()
bufw := bufio.NewWriter(outfh)
w := NewISO9660Writer(bufw)
writePrimaryVolumeDescriptor(w, inputFileSize, inputFilename)
writeVolumeDescriptorSetTerminator(w)
writePathTable(w, binary.LittleEndian)
writePathTable(w, binary.BigEndian)
writeData(w, infh, inputFileSize, inputFilename)
w.Finish()
err := bufw.Flush()
if err != nil {
panic(err)
}
}()
if err != nil {
return fmt.Errorf("could not write to output file: %s", err)
}
return nil
}
func writePrimaryVolumeDescriptor(w *ISO9660Writer, inputFileSize uint32, inputFilename string) {
if len(inputFilename) > 32 {
inputFilename = inputFilename[:32]
}
now := time.Now()
sw := w.NextSector()
if w.CurrentSector() != primaryVolumeSectorNum {
Panicf("internal error: unexpected primary volume sector %d", w.CurrentSector())
}
sw.WriteByte('\x01')
sw.WriteString(volumeDescriptorSetMagic)
sw.WriteByte('\x00')
sw.WritePaddedString("", 32)
sw.WritePaddedString(inputFilename, 32)
sw.WriteZeros(8)
sw.WriteBothEndianDWord(numTotalSectors(inputFileSize))
sw.WriteZeros(32)
sw.WriteBothEndianWord(1) // volume set size
sw.WriteBothEndianWord(1) // volume sequence number
sw.WriteBothEndianWord(uint16(SectorSize))
sw.WriteBothEndianDWord(SectorSize) // path table length
sw.WriteLittleEndianDWord(littleEndianPathTableSectorNum)
sw.WriteLittleEndianDWord(0) // no secondary path tables
sw.WriteBigEndianDWord(bigEndianPathTableSectorNum)
sw.WriteBigEndianDWord(0) // no secondary path tables
WriteDirectoryRecord(sw, "\x00", rootDirectorySectorNum) // root directory
sw.WritePaddedString("", 128) // volume set identifier
sw.WritePaddedString("", 128) // publisher identifier
sw.WritePaddedString("", 128) // data preparer identifier
sw.WritePaddedString("", 128) // application identifier
sw.WritePaddedString("", 37) // copyright file identifier
sw.WritePaddedString("", 37) // abstract file identifier
sw.WritePaddedString("", 37) // bibliographical file identifier
sw.WriteDateTime(now) // volume creation
sw.WriteDateTime(now) // most recent modification
sw.WriteUnspecifiedDateTime() // expires
sw.WriteUnspecifiedDateTime() // is effective (?)
sw.WriteByte('\x01') // version
sw.WriteByte('\x00') // reserved
sw.PadWithZeros() // 512 (reserved for app) + 653 (zeros)
}
func writeVolumeDescriptorSetTerminator(w *ISO9660Writer) {
sw := w.NextSector()
if w.CurrentSector() != primaryVolumeSectorNum+1 {
Panicf("internal error: unexpected volume descriptor set terminator sector %d", w.CurrentSector())
}
sw.WriteByte('\xFF')
sw.WriteString(volumeDescriptorSetMagic)
sw.PadWithZeros()
}
func writePathTable(w *ISO9660Writer, bo binary.ByteOrder) {
sw := w.NextSector()
sw.WriteByte(1) // name length
sw.WriteByte(0) // number of sectors in extended attribute record
sw.WriteDWord(bo, rootDirectorySectorNum)
sw.WriteWord(bo, 1) // parent directory recno (root directory)
sw.WriteByte(0) // identifier (root directory)
sw.WriteByte(1) // padding
sw.PadWithZeros()
}
func writeData(w *ISO9660Writer, infh io.Reader, inputFileSize uint32, inputFilename string) {
sw := w.NextSector()
if w.CurrentSector() != rootDirectorySectorNum {
Panicf("internal error: unexpected root directory sector %d", w.CurrentSector())
}
WriteDirectoryRecord(sw, "\x00", w.CurrentSector())
WriteDirectoryRecord(sw, "\x01", rootDirectorySectorNum)
WriteFileRecordHeader(sw, inputFilename, w.CurrentSector()+1, inputFileSize)
// Now stream the data. Note that the first buffer is never of SectorSize,
// since we've already filled a part of the sector.
b := make([]byte, SectorSize)
total := uint32(0)
for {
l, err := infh.Read(b)
if err != nil && err != io.EOF {
Panicf("could not read from input file: %s", err)
}
if l > 0 {
sw = w.NextSector()
sw.Write(b[:l])
total += uint32(l)
}
if err == io.EOF {
break
}
}
if total != inputFileSize {
Panicf("input file size changed while the ISO file was being created (expected to read %d, read %d)", inputFileSize, total)
} else if w.CurrentSector() != numTotalSectors(inputFileSize)-1 {
Panicf("internal error: unexpected last sector number (expected %d, actual %d)",
numTotalSectors(inputFileSize)-1, w.CurrentSector())
}
}
func numTotalSectors(inputFileSize uint32) uint32 {
var numDataSectors uint32
numDataSectors = (inputFileSize + (SectorSize - 1)) / SectorSize
return 1 + rootDirectorySectorNum + numDataSectors
}
func getInputFileSizeAndName(fh *os.File) (uint32, string, error) {
fi, err := fh.Stat()
if err != nil {
return 0, "", err
}
if fi.Size() >= math.MaxUint32 {
return 0, "", fmt.Errorf("file size %d is too large", fi.Size())
}
return uint32(fi.Size()), fi.Name(), nil
}
func filenameSatisfiesISOConstraints(filename string) bool {
invalidCharacter := func(r rune) bool {
// According to ISO9660, only capital letters, digits, and underscores
// are permitted. Some sources say a dot is allowed as well. I'm too
// lazy to figure it out right now.
if r >= 'A' && r <= 'Z' {
return false
} else if r >= '0' && r <= '9' {
return false
} else if r == '_' {
return false
} else if r == '.' {
return false
}
return true
}
return strings.IndexFunc(filename, invalidCharacter) == -1
}

@ -1 +0,0 @@
Subproject commit b061729afc07e77a8aa4fad0a2fd840958f1942a

@ -1 +0,0 @@
Subproject commit 92ea23a837e66f46ac9e7d04fa826602b7b0a42d

@ -1 +0,0 @@
Subproject commit 9ff6c6923cfffbcd502984b8e0c80539a94968b7

View File

@ -1,6 +1,17 @@
github.com/docker/hyperkit/go 874e68dbb7a2a7a2794dbd8648c2f4be1e7a8bb3
github.com/googleapis/gax-go 8c5154c0fe5bf18cf649634d4c6df50897a32751
github.com/golang/protobuf/proto c9c7427a2a70d2eb3bafa0ab2dc163e45f143317
github.com/Masterminds/semver 312afcd0e81e5cf81fdc3cfd0e8504ae031521c8
github.com/Masterminds/sprig 01a849f546a584d7b29bfee253e7db0aed44f7ba
github.com/Sirupsen/logrus 10f801ebc38b33738c9d17d50860f484a0988ff5
github.com/aokoli/goutils 9c37978a95bd5c709a15883b6242714ea6709e64
github.com/armon/go-radix 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
github.com/docker/infrakit 208d114478ed94ee9015083e13946ca1caaad790
github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
github.com/gorilla/rpc 22c016f3df3febe0c1f6727598b6389507e03a18
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/mitchellh/go-ps 4fdf99ab29366514c69ccccddab5dc58b8d84062
github.com/rneugeba/iso9660wrap 9c7eaf5ac74b2416be8b7b8d1f35b9af44a6e4fa
github.com/surma/gocpio fcb68777e7dc4ea43ffce871b552c0d073c17495
@ -10,3 +21,9 @@ golang.org/x/oauth2 1611bb46e67abc64a71ecc5c3ae67f1cbbc2b921
google.golang.org/api 1202890e803f07684581b575fda809bf335a533f
google.golang.org/grpc 0713829b980f4ddd276689a36235c5fcc82a21bf
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
github.com/satori/go.uuid b061729afc07e77a8aa4fad0a2fd840958f1942a
github.com/spf13/cobra 7be4beda01ec05d0b93d80b3facd2b6f44080d94
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
golang.org/x/crypto 459e26527287adbc2adcc5d0d49abff9a5f315a7
golang.org/x/sys 99f16d856c9836c42d24e7ab64ea72916925fa97
gopkg.in/tylerb/graceful.v1 4654dfbb6ad53cb5e27f37d99b02e16c1872fbbb

View File

@ -1,6 +1,5 @@
The MIT License (MIT)
Copyright (c) 2015 Marko Tiikkaja
Sprig
Copyright (C) 2013 Masterminds
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -9,13 +8,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

311
vendor/github.com/Masterminds/sprig/README.md generated vendored Normal file
View File

@ -0,0 +1,311 @@
# Sprig: Template functions for Go templates
The Go language comes with a [built-in template
language](http://golang.org/pkg/text/template/), but not
very many template functions. This library provides a group of commonly
used template functions.
It is inspired by the template functions found in
[Twig](http://twig.sensiolabs.org/documentation).
[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig)
## Usage
API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig), but
read on for standard usage.
### Load the Sprig library
To load the Sprig `FuncMap`:
```go
import (
"github.com/Masterminds/sprig"
"html/template"
)
// This example illustrates that the FuncMap *must* be set before the
// templates themselves are loaded.
tpl := template.Must(
template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
)
```
### Call the functions inside of templates
By convention, all functions are lowercase. This seems to follow the Go
idiom for template functions (as opposed to template methods, which are
TitleCase).
Example:
```
{{ "hello!" | upper | repeat 5 }}
```
Produces:
```
HELLO!HELLO!HELLO!HELLO!HELLO!
```
## Functions
### Date Functions
- date: Format a date, where a date is an integer type or a time.Time type, and
format is a time.Format formatting string.
- dateModify: Given a date, modify it with a duration: `date_modify "-1.5h" now`. If the duration doesn't
parse, it returns the time unaltered. See `time.ParseDuration` for info on duration strings.
- now: Current time.Time, for feeding into date-related functions.
- htmlDate: Format a date for use in the value field of an HTML "date" form element.
- dateInZone: Like date, but takes three arguments: format, timestamp,
timezone.
- htmlDateInZone: Like htmlDate, but takes two arguments: timestamp,
timezone.
### String Functions
- trim: strings.TrimSpace
- trimAll: strings.Trim, but with the argument order reversed `trimAll "$" "$5.00"` or `"$5.00 | trimAll "$"`
- trimSuffix: strings.TrimSuffix, but with the argument order reversed `trimSuffix "-" "5-"`
- trimPrefix: strings.TrimPrefix, but with the argument order reversed `trimPrefix "$" "$5"`
- upper: strings.ToUpper
- lower: strings.ToLower
- title: strings.Title
- repeat: strings.Repeat, but with the arguments switched: `repeat count str`. (This simplifies common pipelines)
- substr: Given string, start, and length, return a substr.
- nospace: Remove all spaces from a string. `h e l l o` becomes
`hello`.
- abbrev: Truncate a string with ellipses
- trunc: Truncate a string (no suffix). `trunc 5 "Hello World"` yields "hello".
- abbrevboth: Truncate both sides of a string with ellipses
- untitle: Remove title case
- intials: Given multiple words, return the first letter of each
word
- randAlphaNum: Generate a random alpha-numeric string
- randAlpha: Generate a random alphabetic string
- randAscii: Generate a random ASCII string, including symbols
- randNumeric: Generate a random numeric string
- wrap: Wrap text at the given column count
- wrapWith: Wrap text at the given column count, and with the given
string for a line terminator: `wrap 50 "\n\t" $string`
- contains: strings.Contains, but with the arguments switched: `contains "cat" "uncatch"`. (This simplifies common pipelines)
- hasPrefix: strings.hasPrefix, but with the arguments switched: `hasPrefix "cat" "catch"`.
- hasSuffix: strings.hasSuffix, but with the arguments switched: `hasSuffix "cat" "ducat"`.
- quote: Wrap strings in double quotes. `quote "a" "b"` returns `"a"
"b"`
- squote: Wrap strings in single quotes.
- cat: Concatenate strings, separating them by spaces. `cat $a $b $c`.
- indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar"
- replace: Replace an old with a new in a string: `$name | replace " " "-"`
- plural: Choose singular or plural based on length: `len $fish | plural
"one anchovy" "many anchovies"`
- uuidv4: Generate a UUID v4 string
- sha256sum: Generate a hex encoded sha256 hash of the input
- toString: Convert something to a string
### String Slice Functions:
- join: strings.Join, but as `join SEP SLICE`
- split: strings.Split, but as `split SEP STRING`. The results are returned
as a map with the indexes set to _N, where N is an integer starting from 0.
Use it like this: `{{$v := "foo/bar/baz" | split "/"}}{{$v._0}}` (Prints `foo`)
- splitList: strings.Split, but as `split SEP STRING`. The results are returned
as an array.
- toStrings: convert a list to a list of strings. 'list 1 2 3 | toStrings' produces '["1" "2" "3"]'
- sortAlpha: sort a list lexicographically.
### Integer Slice Functions:
- until: Given an integer, returns a slice of counting integers from 0 to one
less than the given integer: `range $i, $e := until 5`
- untilStep: Given start, stop, and step, return an integer slice starting at
'start', stopping at `stop`, and incrementing by 'step'. This is the same
as Python's long-form of 'range'.
### Conversions:
- atoi: Convert a string to an integer. 0 if the integer could not be parsed.
- int: Convert a string or numeric to an int
- int64: Convert a string or numeric to an int64
- float64: Convert a string or numeric to a float64
### Defaults:
- default: Give a default value. Used like this: {{trim " "| default "empty"}}.
Since trim produces an empty string, the default value is returned. For
things with a length (strings, slices, maps), len(0) will trigger the default.
For numbers, the value 0 will trigger the default. For booleans, false will
trigger the default. For structs, the default is never returned (there is
no clear empty condition). For everything else, nil value triggers a default.
- empty: Returns true if the given value is the zero value for that
type. Structs are always non-empty.
- coalesce: Given a list of items, return the first non-empty one.
This follows the same rules as 'empty'. `{{ coalesce .someVal 0 "hello" }}`
will return `.someVal` if set, or else return "hello". The 0 is skipped
because it is an empty value.
- compact: Return a copy of a list with all of the empty values removed.
`list 0 1 2 "" | compact` will return `[1 2]`
### OS:
- env: Read an environment variable.
- expandenv: Expand all environment variables in a string.
### File Paths:
- base: Return the last element of a path. https://golang.org/pkg/path#Base
- dir: Remove the last element of a path. https://golang.org/pkg/path#Dir
- clean: Clean a path to the shortest equivalent name. (e.g. remove "foo/.."
from "foo/../bar.html") https://golang.org/pkg/path#Clean
- ext: Get the extension for a file path: https://golang.org/pkg/path#Ext
- isAbs: Returns true if a path is absolute: https://golang.org/pkg/path#IsAbs
### Encoding:
- b32enc: Encode a string into a Base32 string
- b32dec: Decode a string from a Base32 string
- b64enc: Encode a string into a Base64 string
- b64dec: Decode a string from a Base64 string
### Data Structures:
- tuple: Takes an arbitrary list of items and returns a slice of items. Its
tuple-ish properties are mainly gained through the template idiom, and not
through an API provided here. WARNING: The implementation of tuple will
change in the future.
- list: An arbitrary ordered list of items. (This is prefered over tuple.)
- dict: Takes a list of name/values and returns a map[string]interface{}.
The first parameter is converted to a string and stored as a key, the
second parameter is treated as the value. And so on, with odds as keys and
evens as values. If the function call ends with an odd, the last key will
be assigned the empty string. Non-string keys are converted to strings as
follows: []byte are converted, fmt.Stringers will have String() called.
errors will have Error() called. All others will be passed through
fmt.Sprtinf("%v"). _dicts are unordered_.
List:
```
{{$t := list 1 "a" "foo"}}
{{index $t 2}}{{index $t 0 }}{{index $t 1}}
{{/* Prints foo1a *}}
```
Dict:
```
{{ $t := map "key1" "value1" "key2" "value2" }}
{{ $t.key2 }}
{{ /* Prints value2 *}}
```
### Lists Functions:
These are used to manipulate lists: `{{ list 1 2 3 | reverse | first }}`
- first: Get the first item in a 'list'. 'list 1 2 3 | first' prints '1'
- last: Get the last item in a 'list': 'list 1 2 3 | last ' prints '3'
- rest: Get all but the first item in a list: 'list 1 2 3 | rest' returns '[2 3]'
- initial: Get all but the last item in a list: 'list 1 2 3 | initial' returns '[1 2]'
- append: Add an item to the end of a list: 'append $list 4' adds '4' to the end of '$list'
- prepend: Add an item to the beginning of a list: 'prepend $list 4' puts 4 at the beginning of the list.
- reverse: Reverse the items in a list.
- uniq: Remove duplicates from a list.
- without: Return a list with the given values removed: 'without (list 1 2 3) 1' would return '[2 3]'
- has: Return 'tru' if the item is found in the list: 'has "foo" $list' will return 'true' if the list contains "foo"
### Dict Functions:
These are used to manipulate dicts.
- set: Takes a dict, a key, and a value, and sets that key/value pair in
the dict. `set $dict $key $value`. For convenience, it returns the dict,
even though the dict was modified in place.
- unset: Takes a dict and a key, and deletes that key/value pair from the
dict. `unset $dict $key`. This returns the dict for convenience.
- hasKey: Takes a dict and a key, and returns boolean true if the key is in
the dict.
- pluck: Given a key and one or more maps, get all of the values for that key.
- keys: Get an array of all of the keys in a dict. Order is not guaranteed.
- pick: Select just the given keys out of the dict, and return a new dict.
- omit: Return a dict without the given keys.
### Reflection:
- typeOf: Takes an interface and returns a string representation of the type.
For pointers, this will return a type prefixed with an asterisk(`*`). So
a pointer to type `Foo` will be `*Foo`.
- typeIs: Compares an interface with a string name, and returns true if they match.
Note that a pointer will not match a reference. For example `*Foo` will not
match `Foo`.
- typeIsLike: returns true if the interface is of the given type, or
is a pointer to the given type.
- kindOf: Takes an interface and returns a string representation of its kind.
- kindIs: Returns true if the given string matches the kind of the given interface.
Note: None of these can test whether or not something implements a given
interface, since doing so would require compiling the interface in ahead of
time.
### Math Functions:
Integer functions will convert integers of any width to `int64`. If a
string is passed in, functions will attempt to conver with
`strconv.ParseInt(s, 1064)`. If this fails, the value will be treated as 0.
- add1: Increment an integer by 1
- add: Sum integers. `add 1 2 3` renders `6`
- sub: Subtract the second integer from the first
- div: Divide the first integer by the second
- mod: Module of first integer divided by second
- mul: Multiply integers integers
- max (biggest): Return the biggest of a series of integers. `max 1 2 3`
returns `3`.
- min: Return the smallest of a series of integers. `min 1 2 3` returns
`1`.
### Cryptographic Functions:
- derivePassword: Derive a password from the given parameters according to the "Master Password" algorithm (http://masterpasswordapp.com/algorithm.html)
Given parameters (in order) are:
`counter` (starting with 1), `password_type` (maximum, long, medium, short, basic, or pin), `password`,
`user`, and `site`. The following line generates a long password for the user "user" and with a master-password "password" on the site "example.com":
```
{{ derivePassword 1 "long" "password" "user" "example.com" }}
```
## SemVer Functions:
These functions provide version parsing and comparisons for SemVer 2 version
strings.
- semver: Parse a semantic version and return a Version object.
- semverCompare: Compare a SemVer range to a particular version.
## Principles:
The following principles were used in deciding on which functions to add, and
determining how to implement them.
- Template functions should be used to build layout. Therefore, the following
types of operations are within the domain of template functions:
- Formatting
- Layout
- Simple type conversions
- Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
- Template functions should not return errors unless there is no way to print
a sensible value. For example, converting a string to an integer should not
produce an error if conversion fails. Instead, it should display a default
value that can be displayed.
- Simple math is necessary for grid layouts, pagers, and so on. Complex math
(anything other than arithmetic) should be done outside of templates.
- Template functions only deal with the data passed into them. They never retrieve
data from a source.
- Finally, do not override core Go template functions.

148
vendor/github.com/Masterminds/sprig/crypto.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package sprig
import (
"bytes"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"encoding/pem"
"fmt"
"math/big"
uuid "github.com/satori/go.uuid"
"golang.org/x/crypto/scrypt"
)
func sha256sum(input string) string {
hash := sha256.Sum256([]byte(input))
return hex.EncodeToString(hash[:])
}
// uuidv4 provides a safe and secure UUID v4 implementation
func uuidv4() string {
return fmt.Sprintf("%s", uuid.NewV4())
}
var master_password_seed = "com.lyndir.masterpassword"
var password_type_templates = map[string][][]byte{
"maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")},
"long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"),
[]byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"),
[]byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"),
[]byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"),
[]byte("CvccCvcvCvccno")},
"medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")},
"short": {[]byte("Cvcn")},
"basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")},
"pin": {[]byte("nnnn")},
}
var template_characters = map[byte]string{
'V': "AEIOU",
'C': "BCDFGHJKLMNPQRSTVWXYZ",
'v': "aeiou",
'c': "bcdfghjklmnpqrstvwxyz",
'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ",
'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz",
'n': "0123456789",
'o': "@&%?,=[]_:-+*$#!'^~;()/.",
'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()",
}
func derivePassword(counter uint32, password_type, password, user, site string) string {
var templates = password_type_templates[password_type]
if templates == nil {
return fmt.Sprintf("cannot find password template %s", password_type)
}
var buffer bytes.Buffer
buffer.WriteString(master_password_seed)
binary.Write(&buffer, binary.BigEndian, uint32(len(user)))
buffer.WriteString(user)
salt := buffer.Bytes()
key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64)
if err != nil {
return fmt.Sprintf("failed to derive password: %s", err)
}
buffer.Truncate(len(master_password_seed))
binary.Write(&buffer, binary.BigEndian, uint32(len(site)))
buffer.WriteString(site)
binary.Write(&buffer, binary.BigEndian, counter)
var hmacv = hmac.New(sha256.New, key)
hmacv.Write(buffer.Bytes())
var seed = hmacv.Sum(nil)
var temp = templates[int(seed[0])%len(templates)]
buffer.Truncate(0)
for i, element := range temp {
pass_chars := template_characters[element]
pass_char := pass_chars[int(seed[i+1])%len(pass_chars)]
buffer.WriteByte(pass_char)
}
return buffer.String()
}
func generatePrivateKey(typ string) string {
var priv interface{}
var err error
switch typ {
case "", "rsa":
// good enough for government work
priv, err = rsa.GenerateKey(rand.Reader, 4096)
case "dsa":
key := new(dsa.PrivateKey)
// again, good enough for government work
if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil {
return fmt.Sprintf("failed to generate dsa params: %s", err)
}
err = dsa.GenerateKey(key, rand.Reader)
priv = key
case "ecdsa":
// again, good enough for government work
priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
default:
return "Unknown type " + typ
}
if err != nil {
return fmt.Sprintf("failed to generate private key: %s", err)
}
return string(pem.EncodeToMemory(pemBlockForKey(priv)))
}
type DSAKeyFormat struct {
Version int
P, Q, G, Y, X *big.Int
}
func pemBlockForKey(priv interface{}) *pem.Block {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
case *dsa.PrivateKey:
val := DSAKeyFormat{
P: k.P, Q: k.Q, G: k.G,
Y: k.Y, X: k.X,
}
bytes, _ := asn1.Marshal(val)
return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes}
case *ecdsa.PrivateKey:
b, _ := x509.MarshalECPrivateKey(k)
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
default:
return nil
}
}

53
vendor/github.com/Masterminds/sprig/date.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package sprig
import (
"time"
)
// Given a format and a date, format the date string.
//
// Date can be a `time.Time` or an `int, int32, int64`.
// In the later case, it is treated as seconds since UNIX
// epoch.
func date(fmt string, date interface{}) string {
return dateInZone(fmt, date, "Local")
}
func htmlDate(date interface{}) string {
return dateInZone("2006-01-02", date, "Local")
}
func htmlDateInZone(date interface{}, zone string) string {
return dateInZone("2006-01-02", date, zone)
}
func dateInZone(fmt string, date interface{}, zone string) string {
var t time.Time
switch date := date.(type) {
default:
t = time.Now()
case time.Time:
t = date
case int64:
t = time.Unix(date, 0)
case int:
t = time.Unix(int64(date), 0)
case int32:
t = time.Unix(int64(date), 0)
}
loc, err := time.LoadLocation(zone)
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format(fmt)
}
func dateModify(fmt string, date time.Time) time.Time {
d, err := time.ParseDuration(fmt)
if err != nil {
return date
}
return date.Add(d)
}

62
vendor/github.com/Masterminds/sprig/defaults.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package sprig
import (
"reflect"
)
// dfault checks whether `given` is set, and returns default if not set.
//
// This returns `d` if `given` appears not to be set, and `given` otherwise.
//
// For numeric types 0 is unset.
// For strings, maps, arrays, and slices, len() = 0 is considered unset.
// For bool, false is unset.
// Structs are never considered unset.
//
// For everything else, including pointers, a nil value is unset.
func dfault(d interface{}, given ...interface{}) interface{} {
if empty(given) || empty(given[0]) {
return d
}
return given[0]
}
// empty returns true if the given value has the zero value for its type.
func empty(given interface{}) bool {
g := reflect.ValueOf(given)
if !g.IsValid() {
return true
}
// Basically adapted from text/template.isTrue
switch g.Kind() {
default:
return g.IsNil()
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return g.Len() == 0
case reflect.Bool:
return g.Bool() == false
case reflect.Complex64, reflect.Complex128:
return g.Complex() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return g.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return g.Uint() == 0
case reflect.Float32, reflect.Float64:
return g.Float() == 0
case reflect.Struct:
return false
}
return true
}
// coalesce returns the first non-empty value.
func coalesce(v ...interface{}) interface{} {
for _, val := range v {
if !empty(val) {
return val
}
}
return nil
}

74
vendor/github.com/Masterminds/sprig/dict.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package sprig
func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
d[key] = value
return d
}
func unset(d map[string]interface{}, key string) map[string]interface{} {
delete(d, key)
return d
}
func hasKey(d map[string]interface{}, key string) bool {
_, ok := d[key]
return ok
}
func pluck(key string, d ...map[string]interface{}) []interface{} {
res := []interface{}{}
for _, dict := range d {
if val, ok := dict[key]; ok {
res = append(res, val)
}
}
return res
}
func keys(dict map[string]interface{}) []string {
k := []string{}
for key := range dict {
k = append(k, key)
}
return k
}
func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
res := map[string]interface{}{}
for _, k := range keys {
if v, ok := dict[k]; ok {
res[k] = v
}
}
return res
}
func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
res := map[string]interface{}{}
omit := make(map[string]bool, len(keys))
for _, k := range keys {
omit[k] = true
}
for k, v := range dict {
if _, ok := omit[k]; !ok {
res[k] = v
}
}
return res
}
func dict(v ...interface{}) map[string]interface{} {
dict := map[string]interface{}{}
lenv := len(v)
for i := 0; i < lenv; i += 2 {
key := strval(v[i])
if i+1 >= lenv {
dict[key] = ""
continue
}
dict[key] = v[i+1]
}
return dict
}

225
vendor/github.com/Masterminds/sprig/doc.go generated vendored Normal file
View File

@ -0,0 +1,225 @@
/*
Sprig: Template functions for Go.
This package contains a number of utility functions for working with data
inside of Go `html/template` and `text/template` files.
To add these functions, use the `template.Funcs()` method:
t := templates.New("foo").Funcs(sprig.FuncMap())
Note that you should add the function map before you parse any template files.
In several cases, Sprig reverses the order of arguments from the way they
appear in the standard library. This is to make it easier to pipe
arguments into functions.
Date Functions
- date FORMAT TIME: Format a date, where a date is an integer type or a time.Time type, and
format is a time.Format formatting string.
- dateModify: Given a date, modify it with a duration: `date_modify "-1.5h" now`. If the duration doesn't
parse, it returns the time unaltered. See `time.ParseDuration` for info on duration strings.
- now: Current time.Time, for feeding into date-related functions.
- htmlDate TIME: Format a date for use in the value field of an HTML "date" form element.
- dateInZone FORMAT TIME TZ: Like date, but takes three arguments: format, timestamp,
timezone.
- htmlDateInZone TIME TZ: Like htmlDate, but takes two arguments: timestamp,
timezone.
String Functions
- abbrev: Truncate a string with ellipses. `abbrev 5 "hello world"` yields "he..."
- abbrevboth: Abbreviate from both sides, yielding "...lo wo..."
- trunc: Truncate a string (no suffix). `trunc 5 "Hello World"` yields "hello".
- trim: strings.TrimSpace
- trimAll: strings.Trim, but with the argument order reversed `trimAll "$" "$5.00"` or `"$5.00 | trimAll "$"`
- trimSuffix: strings.TrimSuffix, but with the argument order reversed: `trimSuffix "-" "ends-with-"`
- trimPrefix: strings.TrimPrefix, but with the argument order reversed `trimPrefix "$" "$5"`
- upper: strings.ToUpper
- lower: strings.ToLower
- nospace: Remove all space characters from a string. `nospace "h e l l o"` becomes "hello"
- title: strings.Title
- untitle: Remove title casing
- repeat: strings.Repeat, but with the arguments switched: `repeat count str`. (This simplifies common pipelines)
- substr: Given string, start, and length, return a substr.
- initials: Given a multi-word string, return the initials. `initials "Matt Butcher"` returns "MB"
- randAlphaNum: Given a length, generate a random alphanumeric sequence
- randAlpha: Given a length, generate an alphabetic string
- randAscii: Given a length, generate a random ASCII string (symbols included)
- randNumeric: Given a length, generate a string of digits.
- wrap: Force a line wrap at the given width. `wrap 80 "imagine a longer string"`
- wrapWith: Wrap a line at the given length, but using 'sep' instead of a newline. `wrapWith 50, "<br>", $html`
- contains: strings.Contains, but with the arguments switched: `contains substr str`. (This simplifies common pipelines)
- hasPrefix: strings.hasPrefix, but with the arguments switched
- hasSuffix: strings.hasSuffix, but with the arguments switched
- quote: Wrap string(s) in double quotation marks, escape the contents by adding '\' before '"'.
- squote: Wrap string(s) in double quotation marks, does not escape content.
- cat: Concatenate strings, separating them by spaces. `cat $a $b $c`.
- indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar"
- replace: Replace an old with a new in a string: `$name | replace " " "-"`
- plural: Choose singular or plural based on length: `len $fish | plural "one anchovy" "many anchovies"`
- sha256sum: Generate a hex encoded sha256 hash of the input
- toString: Convert something to a string
String Slice Functions:
- join: strings.Join, but as `join SEP SLICE`
- split: strings.Split, but as `split SEP STRING`. The results are returned
as a map with the indexes set to _N, where N is an integer starting from 0.
Use it like this: `{{$v := "foo/bar/baz" | split "/"}}{{$v._0}}` (Prints `foo`)
- splitList: strings.Split, but as `split SEP STRING`. The results are returned
as an array.
- toStrings: convert a list to a list of strings. 'list 1 2 3 | toStrings' produces '["1" "2" "3"]'
- sortAlpha: sort a list lexicographically.
Integer Slice Functions:
- until: Given an integer, returns a slice of counting integers from 0 to one
less than the given integer: `range $i, $e := until 5`
- untilStep: Given start, stop, and step, return an integer slice starting at
'start', stopping at `stop`, and incrementing by 'step. This is the same
as Python's long-form of 'range'.
Conversions:
- atoi: Convert a string to an integer. 0 if the integer could not be parsed.
- in64: Convert a string or another numeric type to an int64.
- int: Convert a string or another numeric type to an int.
- float64: Convert a string or another numeric type to a float64.
Defaults:
- default: Give a default value. Used like this: trim " "| default "empty".
Since trim produces an empty string, the default value is returned. For
things with a length (strings, slices, maps), len(0) will trigger the default.
For numbers, the value 0 will trigger the default. For booleans, false will
trigger the default. For structs, the default is never returned (there is
no clear empty condition). For everything else, nil value triggers a default.
- empty: Return true if the given value is the zero value for its type.
Caveats: structs are always non-empty. This should match the behavior of
{{if pipeline}}, but can be used inside of a pipeline.
- coalesce: Given a list of items, return the first non-empty one.
This follows the same rules as 'empty'. '{{ coalesce .someVal 0 "hello" }}`
will return `.someVal` if set, or else return "hello". The 0 is skipped
because it is an empty value.
- compact: Return a copy of a list with all of the empty values removed.
'list 0 1 2 "" | compact' will return '[1 2]'
OS:
- env: Resolve an environment variable
- expandenv: Expand a string through the environment
File Paths:
- base: Return the last element of a path. https://golang.org/pkg/path#Base
- dir: Remove the last element of a path. https://golang.org/pkg/path#Dir
- clean: Clean a path to the shortest equivalent name. (e.g. remove "foo/.."
from "foo/../bar.html") https://golang.org/pkg/path#Clean
- ext: https://golang.org/pkg/path#Ext
- isAbs: https://golang.org/pkg/path#IsAbs
Encoding:
- b64enc: Base 64 encode a string.
- b64dec: Base 64 decode a string.
Reflection:
- typeOf: Takes an interface and returns a string representation of the type.
For pointers, this will return a type prefixed with an asterisk(`*`). So
a pointer to type `Foo` will be `*Foo`.
- typeIs: Compares an interface with a string name, and returns true if they match.
Note that a pointer will not match a reference. For example `*Foo` will not
match `Foo`.
- typeIsLike: Compares an interface with a string name and returns true if
the interface is that `name` or that `*name`. In other words, if the given
value matches the given type or is a pointer to the given type, this returns
true.
- kindOf: Takes an interface and returns a string representation of its kind.
- kindIs: Returns true if the given string matches the kind of the given interface.
Note: None of these can test whether or not something implements a given
interface, since doing so would require compiling the interface in ahead of
time.
Data Structures:
- tuple: Takes an arbitrary list of items and returns a slice of items. Its
tuple-ish properties are mainly gained through the template idiom, and not
through an API provided here. WARNING: The implementation of tuple will
change in the future.
- list: An arbitrary ordered list of items. (This is prefered over tuple.)
- dict: Takes a list of name/values and returns a map[string]interface{}.
The first parameter is converted to a string and stored as a key, the
second parameter is treated as the value. And so on, with odds as keys and
evens as values. If the function call ends with an odd, the last key will
be assigned the empty string. Non-string keys are converted to strings as
follows: []byte are converted, fmt.Stringers will have String() called.
errors will have Error() called. All others will be passed through
fmt.Sprtinf("%v").
Lists Functions:
These are used to manipulate lists: '{{ list 1 2 3 | reverse | first }}'
- first: Get the first item in a 'list'. 'list 1 2 3 | first' prints '1'
- last: Get the last item in a 'list': 'list 1 2 3 | last ' prints '3'
- rest: Get all but the first item in a list: 'list 1 2 3 | rest' returns '[2 3]'
- initial: Get all but the last item in a list: 'list 1 2 3 | initial' returns '[1 2]'
- append: Add an item to the end of a list: 'append $list 4' adds '4' to the end of '$list'
- prepend: Add an item to the beginning of a list: 'prepend $list 4' puts 4 at the beginning of the list.
- reverse: Reverse the items in a list.
- uniq: Remove duplicates from a list.
- without: Return a list with the given values removed: 'without (list 1 2 3) 1' would return '[2 3]'
- has: Return 'true' if the item is found in the list: 'has "foo" $list' will return 'true' if the list contains "foo"
Dict Functions:
These are used to manipulate dicts.
- set: Takes a dict, a key, and a value, and sets that key/value pair in
the dict. `set $dict $key $value`. For convenience, it returns the dict,
even though the dict was modified in place.
- unset: Takes a dict and a key, and deletes that key/value pair from the
dict. `unset $dict $key`. This returns the dict for convenience.
- hasKey: Takes a dict and a key, and returns boolean true if the key is in
the dict.
- pluck: Given a key and one or more maps, get all of the values for that key.
- keys: Get an array of all of the keys in a dict.
- pick: Select just the given keys out of the dict, and return a new dict.
- omit: Return a dict without the given keys.
Math Functions:
Integer functions will convert integers of any width to `int64`. If a
string is passed in, functions will attempt to convert with
`strconv.ParseInt(s, 1064)`. If this fails, the value will be treated as 0.
- add1: Increment an integer by 1
- add: Sum an arbitrary number of integers
- sub: Subtract the second integer from the first
- div: Divide the first integer by the second
- mod: Module of first integer divided by second
- mul: Multiply integers
- max: Return the biggest of a series of one or more integers
- min: Return the smallest of a series of one or more integers
- biggest: DEPRECATED. Return the biggest of a series of one or more integers
Crypto Functions:
- genPrivateKey: Generate a private key for the given cryptosystem. If no
argument is supplied, by default it will generate a private key using
the RSA algorithm. Accepted values are `rsa`, `dsa`, and `ecdsa`.
- derivePassword: Derive a password from the given parameters according to the ["Master Password" algorithm](http://masterpasswordapp.com/algorithm.html)
Given parameters (in order) are:
`counter` (starting with 1), `password_type` (maximum, long, medium, short, basic, or pin), `password`,
`user`, and `site`
SemVer Functions:
These functions provide version parsing and comparisons for SemVer 2 version
strings.
- semver: Parse a semantic version and return a Version object.
- semverCompare: Compare a SemVer range to a particular version.
*/
package sprig

250
vendor/github.com/Masterminds/sprig/functions.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
package sprig
import (
"html/template"
"os"
"path"
"strconv"
"strings"
ttemplate "text/template"
"time"
util "github.com/aokoli/goutils"
)
// Produce the function map.
//
// Use this to pass the functions into the template engine:
//
// tpl := template.New("foo").Funcs(sprig.FuncMap()))
//
func FuncMap() template.FuncMap {
return HtmlFuncMap()
}
// HermeticTextFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
func HermeticTxtFuncMap() ttemplate.FuncMap {
r := TxtFuncMap()
for _, name := range nonhermeticFunctions {
delete(r, name)
}
return r
}
// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
func HermeticHtmlFuncMap() template.FuncMap {
r := HtmlFuncMap()
for _, name := range nonhermeticFunctions {
delete(r, name)
}
return r
}
// TextFuncMap returns a 'text/template'.FuncMap
func TxtFuncMap() ttemplate.FuncMap {
return ttemplate.FuncMap(GenericFuncMap())
}
// HtmlFuncMap returns an 'html/template'.Funcmap
func HtmlFuncMap() template.FuncMap {
return template.FuncMap(GenericFuncMap())
}
// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
func GenericFuncMap() map[string]interface{} {
gfm := make(map[string]interface{}, len(genericMap))
for k, v := range genericMap {
gfm[k] = v
}
return gfm
}
// These functions are not guaranteed to evaluate to the same result for given input, because they
// refer to the environemnt or global state.
var nonhermeticFunctions = []string{
// Date functions
"date",
"date_in_zone",
"date_modify",
"now",
"htmlDate",
"htmlDateInZone",
"dateInZone",
"dateModify",
// Strings
"randAlphaNum",
"randAlpha",
"randAscii",
"randNumeric",
"uuidv4",
// OS
"env",
"expandenv",
}
var genericMap = map[string]interface{}{
"hello": func() string { return "Hello!" },
// Date functions
"date": date,
"date_in_zone": dateInZone,
"date_modify": dateModify,
"now": func() time.Time { return time.Now() },
"htmlDate": htmlDate,
"htmlDateInZone": htmlDateInZone,
"dateInZone": dateInZone,
"dateModify": dateModify,
// Strings
"abbrev": abbrev,
"abbrevboth": abbrevboth,
"trunc": trunc,
"trim": strings.TrimSpace,
"upper": strings.ToUpper,
"lower": strings.ToLower,
"title": strings.Title,
"untitle": untitle,
"substr": substring,
// Switch order so that "foo" | repeat 5
"repeat": func(count int, str string) string { return strings.Repeat(str, count) },
// Deprecated: Use trimAll.
"trimall": func(a, b string) string { return strings.Trim(b, a) },
// Switch order so that "$foo" | trimall "$"
"trimAll": func(a, b string) string { return strings.Trim(b, a) },
"trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
"trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
"nospace": util.DeleteWhiteSpace,
"initials": initials,
"randAlphaNum": randAlphaNumeric,
"randAlpha": randAlpha,
"randAscii": randAscii,
"randNumeric": randNumeric,
"swapcase": util.SwapCase,
"wrap": func(l int, s string) string { return util.Wrap(s, l) },
"wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
// Switch order so that "foobar" | contains "foo"
"contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
"hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
"hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
"quote": quote,
"squote": squote,
"cat": cat,
"indent": indent,
"replace": replace,
"plural": plural,
"sha256sum": sha256sum,
"toString": strval,
// Wrap Atoi to stop errors.
"atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
"int64": toInt64,
"int": toInt,
"float64": toFloat64,
//"gt": func(a, b int) bool {return a > b},
//"gte": func(a, b int) bool {return a >= b},
//"lt": func(a, b int) bool {return a < b},
//"lte": func(a, b int) bool {return a <= b},
// split "/" foo/bar returns map[int]string{0: foo, 1: bar}
"split": split,
"splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
"toStrings": strslice,
"until": until,
"untilStep": untilStep,
// VERY basic arithmetic.
"add1": func(i interface{}) int64 { return toInt64(i) + 1 },
"add": func(i ...interface{}) int64 {
var a int64 = 0
for _, b := range i {
a += toInt64(b)
}
return a
},
"sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
"div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
"mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
"mul": func(a interface{}, v ...interface{}) int64 {
val := toInt64(a)
for _, b := range v {
val = val * toInt64(b)
}
return val
},
"biggest": max,
"max": max,
"min": min,
// string slices. Note that we reverse the order b/c that's better
// for template processing.
"join": join,
"sortAlpha": sortAlpha,
// Defaults
"default": dfault,
"empty": empty,
"coalesce": coalesce,
"compact": compact,
// Reflection
"typeOf": typeOf,
"typeIs": typeIs,
"typeIsLike": typeIsLike,
"kindOf": kindOf,
"kindIs": kindIs,
// OS:
"env": func(s string) string { return os.Getenv(s) },
"expandenv": func(s string) string { return os.ExpandEnv(s) },
// File Paths:
"base": path.Base,
"dir": path.Dir,
"clean": path.Clean,
"ext": path.Ext,
"isAbs": path.IsAbs,
// Encoding:
"b64enc": base64encode,
"b64dec": base64decode,
"b32enc": base32encode,
"b32dec": base32decode,
// Data Structures:
"tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
"list": list,
"dict": dict,
"set": set,
"unset": unset,
"hasKey": hasKey,
"pluck": pluck,
"keys": keys,
"pick": pick,
"omit": omit,
"append": push, "push": push,
"prepend": prepend,
"first": first,
"rest": rest,
"last": last,
"initial": initial,
"reverse": reverse,
"uniq": uniq,
"without": without,
"has": func(needle interface{}, haystack []interface{}) bool { return inList(haystack, needle) },
// Crypto:
"genPrivateKey": generatePrivateKey,
"derivePassword": derivePassword,
// UUIDs:
"uuidv4": uuidv4,
// SemVer:
"semver": semver,
"semverCompare": semverCompare,
}

109
vendor/github.com/Masterminds/sprig/list.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
package sprig
import (
"reflect"
"sort"
)
func list(v ...interface{}) []interface{} {
return v
}
func push(list []interface{}, v interface{}) []interface{} {
return append(list, v)
}
func prepend(list []interface{}, v interface{}) []interface{} {
return append([]interface{}{v}, list...)
}
func last(list []interface{}) interface{} {
l := len(list)
if l == 0 {
return nil
}
return list[l-1]
}
func first(list []interface{}) interface{} {
if len(list) == 0 {
return nil
}
return list[0]
}
func rest(list []interface{}) []interface{} {
if len(list) == 0 {
return list
}
return list[1:]
}
func initial(list []interface{}) []interface{} {
l := len(list)
if l == 0 {
return list
}
return list[:l-1]
}
func sortAlpha(list interface{}) []string {
k := reflect.Indirect(reflect.ValueOf(list)).Kind()
switch k {
case reflect.Slice, reflect.Array:
a := strslice(list)
s := sort.StringSlice(a)
s.Sort()
return s
}
return []string{strval(list)}
}
func reverse(v []interface{}) []interface{} {
// We do not sort in place because the incomming array should not be altered.
l := len(v)
c := make([]interface{}, l)
for i := 0; i < l; i++ {
c[l-i-1] = v[i]
}
return c
}
func compact(list []interface{}) []interface{} {
res := []interface{}{}
for _, item := range list {
if !empty(item) {
res = append(res, item)
}
}
return res
}
func uniq(list []interface{}) []interface{} {
dest := []interface{}{}
for _, item := range list {
if !inList(dest, item) {
dest = append(dest, item)
}
}
return dest
}
func inList(haystack []interface{}, needle interface{}) bool {
for _, h := range haystack {
if reflect.DeepEqual(needle, h) {
return true
}
}
return false
}
func without(list []interface{}, omit ...interface{}) []interface{} {
res := []interface{}{}
for _, i := range list {
if !inList(omit, i) {
res = append(res, i)
}
}
return res
}

129
vendor/github.com/Masterminds/sprig/numeric.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
package sprig
import (
"math"
"reflect"
"strconv"
)
// toFloat64 converts 64-bit floats
func toFloat64(v interface{}) float64 {
if str, ok := v.(string); ok {
iv, err := strconv.ParseFloat(str, 64)
if err != nil {
return 0
}
return iv
}
val := reflect.Indirect(reflect.ValueOf(v))
switch val.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return float64(val.Int())
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return float64(val.Uint())
case reflect.Uint, reflect.Uint64:
return float64(val.Uint())
case reflect.Float32, reflect.Float64:
return val.Float()
case reflect.Bool:
if val.Bool() == true {
return 1
}
return 0
default:
return 0
}
}
func toInt(v interface{}) int {
//It's not optimal. Bud I don't want duplicate toInt64 code.
return int(toInt64(v))
}
// toInt64 converts integer types to 64-bit integers
func toInt64(v interface{}) int64 {
if str, ok := v.(string); ok {
iv, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return 0
}
return iv
}
val := reflect.Indirect(reflect.ValueOf(v))
switch val.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return val.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64(val.Uint())
case reflect.Uint, reflect.Uint64:
tv := val.Uint()
if tv <= math.MaxInt64 {
return int64(tv)
}
// TODO: What is the sensible thing to do here?
return math.MaxInt64
case reflect.Float32, reflect.Float64:
return int64(val.Float())
case reflect.Bool:
if val.Bool() == true {
return 1
}
return 0
default:
return 0
}
}
func max(a interface{}, i ...interface{}) int64 {
aa := toInt64(a)
for _, b := range i {
bb := toInt64(b)
if bb > aa {
aa = bb
}
}
return aa
}
func min(a interface{}, i ...interface{}) int64 {
aa := toInt64(a)
for _, b := range i {
bb := toInt64(b)
if bb < aa {
aa = bb
}
}
return aa
}
func until(count int) []int {
step := 1
if count < 0 {
step = -1
}
return untilStep(0, count, step)
}
func untilStep(start, stop, step int) []int {
v := []int{}
if stop < start {
if step >= 0 {
return v
}
for i := start; i > stop; i += step {
v = append(v, i)
}
return v
}
if step <= 0 {
return v
}
for i := start; i < stop; i += step {
v = append(v, i)
}
return v
}

28
vendor/github.com/Masterminds/sprig/reflect.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package sprig
import (
"fmt"
"reflect"
)
// typeIs returns true if the src is the type named in target.
func typeIs(target string, src interface{}) bool {
return target == typeOf(src)
}
func typeIsLike(target string, src interface{}) bool {
t := typeOf(src)
return target == t || "*"+target == t
}
func typeOf(src interface{}) string {
return fmt.Sprintf("%T", src)
}
func kindIs(target string, src interface{}) bool {
return target == kindOf(src)
}
func kindOf(src interface{}) string {
return reflect.ValueOf(src).Kind().String()
}

23
vendor/github.com/Masterminds/sprig/semver.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package sprig
import (
sv2 "github.com/Masterminds/semver"
)
func semverCompare(constraint, version string) (bool, error) {
c, err := sv2.NewConstraint(constraint)
if err != nil {
return false, err
}
v, err := sv2.NewVersion(version)
if err != nil {
return false, err
}
return c.Check(v), nil
}
func semver(version string) (*sv2.Version, error) {
return sv2.NewVersion(version)
}

197
vendor/github.com/Masterminds/sprig/strings.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
package sprig
import (
"encoding/base32"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
util "github.com/aokoli/goutils"
)
func base64encode(v string) string {
return base64.StdEncoding.EncodeToString([]byte(v))
}
func base64decode(v string) string {
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return err.Error()
}
return string(data)
}
func base32encode(v string) string {
return base32.StdEncoding.EncodeToString([]byte(v))
}
func base32decode(v string) string {
data, err := base32.StdEncoding.DecodeString(v)
if err != nil {
return err.Error()
}
return string(data)
}
func abbrev(width int, s string) string {
if width < 4 {
return s
}
r, _ := util.Abbreviate(s, width)
return r
}
func abbrevboth(left, right int, s string) string {
if right < 4 || left > 0 && right < 7 {
return s
}
r, _ := util.AbbreviateFull(s, left, right)
return r
}
func initials(s string) string {
// Wrap this just to eliminate the var args, which templates don't do well.
return util.Initials(s)
}
func randAlphaNumeric(count int) string {
// It is not possible, it appears, to actually generate an error here.
r, _ := util.RandomAlphaNumeric(count)
return r
}
func randAlpha(count int) string {
r, _ := util.RandomAlphabetic(count)
return r
}
func randAscii(count int) string {
r, _ := util.RandomAscii(count)
return r
}
func randNumeric(count int) string {
r, _ := util.RandomNumeric(count)
return r
}
func untitle(str string) string {
return util.Uncapitalize(str)
}
func quote(str ...interface{}) string {
out := make([]string, len(str))
for i, s := range str {
out[i] = fmt.Sprintf("%q", strval(s))
}
return strings.Join(out, " ")
}
func squote(str ...interface{}) string {
out := make([]string, len(str))
for i, s := range str {
out[i] = fmt.Sprintf("'%v'", s)
}
return strings.Join(out, " ")
}
func cat(v ...interface{}) string {
r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
return fmt.Sprintf(r, v...)
}
func indent(spaces int, v string) string {
pad := strings.Repeat(" ", spaces)
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
}
func replace(old, new, src string) string {
return strings.Replace(src, old, new, -1)
}
func plural(one, many string, count int) string {
if count == 1 {
return one
}
return many
}
func strslice(v interface{}) []string {
switch v := v.(type) {
case []string:
return v
case []interface{}:
l := len(v)
b := make([]string, l)
for i := 0; i < l; i++ {
b[i] = strval(v[i])
}
return b
default:
val := reflect.ValueOf(v)
switch val.Kind() {
case reflect.Array, reflect.Slice:
l := val.Len()
b := make([]string, l)
for i := 0; i < l; i++ {
b[i] = strval(val.Index(i).Interface())
}
return b
default:
return []string{strval(v)}
}
}
}
func strval(v interface{}) string {
switch v := v.(type) {
case string:
return v
case []byte:
return string(v)
case error:
return v.Error()
case fmt.Stringer:
return v.String()
default:
return fmt.Sprintf("%v", v)
}
}
func trunc(c int, s string) string {
if len(s) <= c {
return s
}
return s[0:c]
}
func join(sep string, v interface{}) string {
return strings.Join(strslice(v), sep)
}
func split(sep, orig string) map[string]string {
parts := strings.Split(orig, sep)
res := make(map[string]string, len(parts))
for i, v := range parts {
res["_"+strconv.Itoa(i)] = v
}
return res
}
// substring creates a substring of the given string.
//
// If start is < 0, this calls string[:length].
//
// If start is >= 0 and length < 0, this calls string[start:]
//
// Otherwise, this calls string[start, length].
func substring(start, length int, s string) string {
if start < 0 {
return s[:length]
}
if length < 0 {
return s[start:]
}
return s[start:length]
}

View File

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2014 Mitchell Hashimoto
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

479
vendor/github.com/Sirupsen/logrus/README.md generated vendored Normal file
View File

@ -0,0 +1,479 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
**Seeing weird case-sensitive problems?** See [this
issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
This change has been reverted. I apologize for causing this. I greatly
underestimated the impact this would have. Logrus strives for stability and
backwards compatibility and failed to provide that.
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"os"
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/Sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Firehose](https://github.com/beaubrewer/firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
logger, hook := NewNullLogger()
logger.Error("Hello error")
assert.Equal(1, len(hook.Entries))
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
hook.Reset()
assert.Nil(hook.LastEntry())
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
* You have no hooks registered, or hooks calling is already thread-safe.
* Writing to logger.Out is already thread-safe, for example:
1) logger.Out is protected by locks.
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

26
vendor/github.com/Sirupsen/logrus/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus
*/
package logrus

275
vendor/github.com/Sirupsen/logrus/entry.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
package logrus
import (
"bytes"
"fmt"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

193
vendor/github.com/Sirupsen/logrus/exported.go generated vendored Normal file
View File

@ -0,0 +1,193 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

45
vendor/github.com/Sirupsen/logrus/formatter.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

34
vendor/github.com/Sirupsen/logrus/hooks.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

74
vendor/github.com/Sirupsen/logrus/json_formatter.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package logrus
import (
"encoding/json"
"fmt"
)
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyLevel: "@message",
// },
// }
FieldMap FieldMap
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

308
vendor/github.com/Sirupsen/logrus/logger.go generated vendored Normal file
View File

@ -0,0 +1,308 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}

143
vendor/github.com/Sirupsen/logrus/logrus.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View File

@ -0,0 +1,10 @@
// +build appengine
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
return true
}

10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

14
vendor/github.com/Sirupsen/logrus/terminal_linux.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -0,0 +1,28 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
var termios Termios
switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
default:
return false
}
}

21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// +build solaris,!appengine
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

33
vendor/github.com/Sirupsen/logrus/terminal_windows.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows,!appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
}

189
vendor/github.com/Sirupsen/logrus/text_formatter.go generated vendored Normal file
View File

@ -0,0 +1,189 @@
package logrus
import (
"bytes"
"fmt"
"sort"
"strings"
"sync"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
)
func init() {
baseTimestamp = time.Now()
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return true
}
}
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !f.needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
errmsg := value.Error()
if !f.needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
fmt.Fprint(b, value)
}
}

62
vendor/github.com/Sirupsen/logrus/writer.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:
printFunc = entry.Info
case WarnLevel:
printFunc = entry.Warn
case ErrorLevel:
printFunc = entry.Error
case FatalLevel:
printFunc = entry.Fatal
case PanicLevel:
printFunc = entry.Panic
default:
printFunc = entry.Print
}
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

202
vendor/github.com/aokoli/goutils/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

72
vendor/github.com/aokoli/goutils/README.md generated vendored Normal file
View File

@ -0,0 +1,72 @@
GoUtils
===========
GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some
string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes:
* WordUtils
* RandomStringUtils
* StringUtils (partial implementation)
## Installation
If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this:
go get github.com/aokoli/goutils
If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils.
## Documentation
GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/aokoli/goutils?status.png)](https://godoc.org/github.com/aokoli/goutils)
## Usage
The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file).
package main
import (
"fmt"
"github.com/aokoli/goutils"
)
func main() {
// EXAMPLE 1: A goutils function which returns no errors
fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
}
Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file).
package main
import (
"fmt"
"github.com/aokoli/goutils"
)
func main() {
// EXAMPLE 2: A goutils function which returns an error
rand1, err1 := goutils.Random (-1, 0, 0, true, true)
if err1 != nil {
fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
} else {
fmt.Println(rand1)
}
}
## License
GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license.
## Issue Reporting
Make suggestions or report issues using the Git issue tracker: https://github.com/aokoli/goutils/issues
## Website
* [GoUtils webpage](http://aokoli.github.io/goutils/)
## Mailing List
Contact [okolialex@gmail.com](mailto:okolialex@mail.com) to be added to the mailing list. You will get updates on the
status of the project and the potential direction it will be heading.

259
vendor/github.com/aokoli/goutils/randomstringutils.go generated vendored Normal file
View File

@ -0,0 +1,259 @@
/*
Copyright 2014 Alexander Okoli
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goutils
import (
"fmt"
"unicode"
"math"
"math/rand"
"time"
)
// Provides the time-based seed used to generate random #s
var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano()))
/*
RandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
Parameter:
count - the length of random string to create
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomNonAlphaNumeric (count int) (string, error) {
return RandomAlphaNumericCustom(count, false, false)
}
/*
RandomAscii creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
Parameter:
count - the length of random string to create
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomAscii(count int) (string, error) {
return Random(count, 32, 127, false, false)
}
/*
RandomNumeric creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of numeric characters.
Parameter:
count - the length of random string to create
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomNumeric (count int) (string, error) {
return Random(count, 0, 0, false, true)
}
/*
RandomAlphabetic creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
Parameters:
count - the length of random string to create
letters - if true, generated string may include alphabetic characters
numbers - if true, generated string may include numeric characters
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomAlphabetic (count int) (string, error) {
return Random(count, 0, 0, true, false)
}
/*
RandomAlphaNumeric creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of alpha-numeric characters.
Parameter:
count - the length of random string to create
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomAlphaNumeric (count int) (string, error) {
return Random(count, 0, 0, true, true)
}
/*
RandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
Parameters:
count - the length of random string to create
letters - if true, generated string may include alphabetic characters
numbers - if true, generated string may include numeric characters
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func RandomAlphaNumericCustom (count int, letters bool, numbers bool) (string, error) {
return Random(count, 0, 0, letters, numbers)
}
/*
Random creates a random string based on a variety of options, using default source of randomness.
This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but
instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance.
Parameters:
count - the length of random string to create
start - the position in set of chars (ASCII/Unicode int) to start at
end - the position in set of chars (ASCII/Unicode int) to end before
letters - if true, generated string may include alphabetic characters
numbers - if true, generated string may include numeric characters
chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
Returns:
string - the random string
error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
*/
func Random (count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
return RandomSeed (count, start, end, letters, numbers, chars, RANDOM)
}
/*
RandomSeed creates a random string based on a variety of options, using supplied source of randomness.
If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
If chars is not nil, characters stored in chars that are between start and end are chosen.
This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance
with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably.
Parameters:
count - the length of random string to create
start - the position in set of chars (ASCII/Unicode decimals) to start at
end - the position in set of chars (ASCII/Unicode decimals) to end before
letters - if true, generated string may include alphabetic characters
numbers - if true, generated string may include numeric characters
chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
random - a source of randomness.
Returns:
string - the random string
error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
*/
func RandomSeed (count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) {
if count == 0 {
return "", nil
} else if count < 0 {
err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
return "", err
}
if chars != nil && len(chars) == 0 {
err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
return "", err
}
if start == 0 && end == 0 {
if chars != nil {
end = len(chars)
} else {
if !letters && !numbers {
end = math.MaxInt32
} else {
end = 'z' + 1
start = ' '
}
}
} else {
if end <= start {
err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
return "", err
}
if chars != nil && end > len(chars) {
err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
return "", err
}
}
buffer := make([]rune, count)
gap := end - start
// high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
// low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
for count != 0 {
count--
var ch rune
if chars == nil {
ch = rune(random.Intn(gap) + start)
} else {
ch = chars[random.Intn(gap) + start]
}
if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
if ch >= 56320 && ch <= 57343 { // low surrogate range
if count == 0 {
count++
} else {
// Insert low surrogate
buffer[count] = ch
count--
// Insert high surrogate
buffer[count] = rune(55296 + random.Intn(128))
}
} else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
if count == 0 {
count++
} else {
// Insert low surrogate
buffer[count] = rune(56320 + random.Intn(128))
count--
// Insert high surrogate
buffer[count] = ch
}
} else if ch >= 56192 && ch <= 56319 {
// private high surrogate, skip it
count++
} else {
// not one of the surrogates*
buffer[count] = ch
}
} else {
count++
}
}
return string(buffer), nil
}

232
vendor/github.com/aokoli/goutils/stringutils.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
/*
Copyright 2014 Alexander Okoli
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goutils
import (
"fmt"
"unicode"
"bytes"
"strings"
)
// Typically returned by functions where a searched item cannot be found
const INDEX_NOT_FOUND = -1
/*
Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..."
Specifically, the algorithm is as follows:
- If str is less than maxWidth characters long, return it.
- Else abbreviate it to (str[0:maxWidth - 3] + "...").
- If maxWidth is less than 4, return an illegal argument error.
- In no case will it return a string of length greater than maxWidth.
Parameters:
str - the string to check
maxWidth - maximum length of result string, must be at least 4
Returns:
string - abbreviated string
error - if the width is too small
*/
func Abbreviate (str string, maxWidth int) (string, error) {
return AbbreviateFull(str, 0, maxWidth)
}
/*
AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..."
This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not
necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear
somewhere in the result.
In no case will it return a string of length greater than maxWidth.
Parameters:
str - the string to check
offset - left edge of source string
maxWidth - maximum length of result string, must be at least 4
Returns:
string - abbreviated string
error - if the width is too small
*/
func AbbreviateFull (str string, offset int, maxWidth int) (string, error) {
if str == "" {
return "", nil
}
if maxWidth < 4 {
err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4")
return "", err
}
if len(str) <= maxWidth {
return str, nil
}
if offset > len(str) {
offset = len(str)
}
if len(str) - offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7
offset = len(str) - (maxWidth - 3)
}
abrevMarker := "..."
if offset <= 4 {
return str[0:maxWidth - 3] + abrevMarker, nil// str.substring(0, maxWidth - 3) + abrevMarker;
}
if maxWidth < 7 {
err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7")
return "", err
}
if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15
abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3))
return abrevMarker + abrevStr, nil// abrevMarker + abbreviate(str.substring(offset), maxWidth - 3);
}
return abrevMarker + str[(len(str) - (maxWidth - 3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3));
}
/*
DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune).
It returns the string without whitespaces.
Parameter:
str - the string to delete whitespace from, may be nil
Returns:
the string without whitespaces
*/
func DeleteWhiteSpace(str string) string {
if str == "" {
return str
}
sz := len(str)
var chs bytes.Buffer
count := 0
for i := 0; i < sz; i++ {
ch := rune(str[i])
if !unicode.IsSpace(ch) {
chs.WriteRune(ch)
count++
}
}
if count == sz {
return str
}
return chs.String()
}
/*
IndexOfDifference compares two strings, and returns the index at which the strings begin to differ.
Parameters:
str1 - the first string
str2 - the second string
Returns:
the index where str1 and str2 begin to differ; -1 if they are equal
*/
func IndexOfDifference(str1 string, str2 string) int {
if str1 == str2 {
return INDEX_NOT_FOUND
}
if IsEmpty(str1) || IsEmpty(str2) {
return 0
}
var i int;
for i = 0; i < len(str1) && i < len(str2); i++ {
if rune(str1[i]) != rune(str2[i]) {
break
}
}
if i < len(str2) || i < len(str1) {
return i
}
return INDEX_NOT_FOUND
}
/*
IsBlank checks if a string is whitespace or empty (""). Observe the following behavior:
goutils.IsBlank("") = true
goutils.IsBlank(" ") = true
goutils.IsBlank("bob") = false
goutils.IsBlank(" bob ") = false
Parameter:
str - the string to check
Returns:
true - if the string is whitespace or empty ("")
*/
func IsBlank(str string) bool {
strLen := len(str)
if str == "" || strLen == 0 {
return true
}
for i := 0; i < strLen; i++ {
if unicode.IsSpace(rune(str[i])) == false {
return false
}
}
return true
}
/*
IndexOf returns the index of the first instance of sub in str, with the search beginning from the
index start point specified. -1 is returned if sub is not present in str.
An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero.
A start position greater than the string length returns -1.
Parameters:
str - the string to check
sub - the substring to find
start - the start position; negative treated as zero
Returns:
the first index where the sub string was found (always >= start)
*/
func IndexOf(str string, sub string, start int) int {
if (start < 0) {
start = 0
}
if len(str) < start {
return INDEX_NOT_FOUND
}
if IsEmpty(str) || IsEmpty(sub) {
return INDEX_NOT_FOUND
}
partialIndex := strings.Index(str[start:len(str)], sub)
if partialIndex == -1 {
return INDEX_NOT_FOUND
}
return partialIndex + start
}
// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise.
func IsEmpty(str string) bool {
return len(str) == 0
}

365
vendor/github.com/aokoli/goutils/wordutils.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
/*
Copyright 2014 Alexander Okoli
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package goutils provides utility functions to manipulate strings in various ways.
The code snippets below show examples of how to use goutils. Some functions return
errors while others do not, so usage would vary as a result.
Example:
package main
import (
"fmt"
"github.com/aokoli/goutils"
)
func main() {
// EXAMPLE 1: A goutils function which returns no errors
fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
// EXAMPLE 2: A goutils function which returns an error
rand1, err1 := goutils.Random (-1, 0, 0, true, true)
if err1 != nil {
fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
} else {
fmt.Println(rand1)
}
}
*/
package goutils
import (
"bytes"
"strings"
"unicode"
)
// VERSION indicates the current version of goutils
const VERSION = "1.0.0"
/*
Wrap wraps a single line of text, identifying words by ' '.
New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped.
Leading spaces on a new line are stripped. Trailing spaces are not stripped.
Parameters:
str - the string to be word wrapped
wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
Returns:
a line with newlines inserted
*/
func Wrap (str string, wrapLength int) string {
return WrapCustom (str, wrapLength, "", false)
}
/*
WrapCustom wraps a single line of text, identifying words by ' '.
Leading spaces on a new line are stripped. Trailing spaces are not stripped.
Parameters:
str - the string to be word wrapped
wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
newLineStr - the string to insert for a new line, "" uses '\n'
wrapLongWords - true if long words (such as URLs) should be wrapped
Returns:
a line with newlines inserted
*/
func WrapCustom (str string, wrapLength int, newLineStr string, wrapLongWords bool) string {
if str == "" {
return ""
}
if newLineStr == "" {
newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons
}
if wrapLength < 1 {
wrapLength = 1
}
inputLineLength := len(str)
offset := 0
var wrappedLine bytes.Buffer
for inputLineLength-offset > wrapLength {
if rune(str[offset]) == ' ' {
offset++
continue
}
end := wrapLength + offset + 1
spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset
if spaceToWrapAt >= offset {
// normal word (not longer than wrapLength)
wrappedLine.WriteString(str[offset:spaceToWrapAt])
wrappedLine.WriteString(newLineStr)
offset = spaceToWrapAt + 1
} else {
// long word or URL
if wrapLongWords {
end := wrapLength + offset
// long words are wrapped one line at a time
wrappedLine.WriteString(str[offset:end])
wrappedLine.WriteString(newLineStr)
offset += wrapLength
} else {
// long words aren't wrapped, just extended beyond limit
end := wrapLength + offset
spaceToWrapAt = strings.IndexRune(str[end:len(str)], ' ') + end
if spaceToWrapAt >= 0 {
wrappedLine.WriteString(str[offset:spaceToWrapAt])
wrappedLine.WriteString(newLineStr)
offset = spaceToWrapAt + 1
} else {
wrappedLine.WriteString(str[offset:len(str)])
offset = inputLineLength
}
}
}
}
wrappedLine.WriteString(str[offset:len(str)])
return wrappedLine.String()
}
/*
Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed.
To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune).
The delimiters represent a set of characters understood to separate words. The first string character
and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "".
Capitalization uses the Unicode title case, normally equivalent to upper case.
Parameters:
str - the string to capitalize
delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
Returns:
capitalized string
*/
func Capitalize (str string, delimiters ...rune) string {
var delimLen int
if delimiters == nil {
delimLen = -1
} else {
delimLen = len(delimiters)
}
if str == "" || delimLen == 0 {
return str;
}
buffer := []rune(str)
capitalizeNext := true
for i := 0; i < len(buffer); i++ {
ch := buffer[i]
if isDelimiter(ch, delimiters...) {
capitalizeNext = true
} else if capitalizeNext {
buffer[i] = unicode.ToTitle(ch)
capitalizeNext = false
}
}
return string(buffer)
}
/*
CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a
titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood
to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized.
Capitalization uses the Unicode title case, normally equivalent to upper case.
Parameters:
str - the string to capitalize fully
delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
Returns:
capitalized string
*/
func CapitalizeFully (str string, delimiters ...rune) string {
var delimLen int
if delimiters == nil {
delimLen = -1
} else {
delimLen = len(delimiters)
}
if str == "" || delimLen == 0 {
return str;
}
str = strings.ToLower(str)
return Capitalize(str, delimiters...);
}
/*
Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed.
The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter
character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char).
Parameters:
str - the string to uncapitalize fully
delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
Returns:
uncapitalized string
*/
func Uncapitalize (str string, delimiters ...rune) string {
var delimLen int
if delimiters == nil {
delimLen = -1
} else {
delimLen = len(delimiters)
}
if str == "" || delimLen == 0 {
return str;
}
buffer := []rune(str)
uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char.
for i := 0; i < len(buffer); i++ {
ch := buffer[i]
if isDelimiter(ch, delimiters...) {
uncapitalizeNext = true
} else if uncapitalizeNext {
buffer[i] = unicode.ToLower(ch)
uncapitalizeNext = false
}
}
return string(buffer)
}
/*
SwapCase swaps the case of a string using a word based algorithm.
Conversion algorithm:
Upper case character converts to Lower case
Title case character converts to Lower case
Lower case character after Whitespace or at start converts to Title case
Other Lower case character converts to Upper case
Whitespace is defined by unicode.IsSpace(char).
Parameters:
str - the string to swap case
Returns:
the changed string
*/
func SwapCase(str string) string {
if str == "" {
return str
}
buffer := []rune(str)
whitespace := true
for i := 0; i < len(buffer); i++ {
ch := buffer[i]
if unicode.IsUpper(ch) {
buffer[i] = unicode.ToLower(ch)
whitespace = false
} else if unicode.IsTitle(ch) {
buffer[i] = unicode.ToLower(ch)
whitespace = false
} else if unicode.IsLower(ch) {
if whitespace {
buffer[i] = unicode.ToTitle(ch)
whitespace = false
} else {
buffer[i] = unicode.ToUpper(ch)
}
} else {
whitespace = unicode.IsSpace(ch)
}
}
return string(buffer);
}
/*
Initials extracts the initial letters from each word in the string. The first letter of the string and all first
letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters
parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string.
Parameters:
str - the string to get initials from
delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter
Returns:
string of initial letters
*/
func Initials(str string, delimiters ...rune) string {
if str == "" {
return str
}
if delimiters != nil && len(delimiters) == 0 {
return ""
}
strLen := len(str)
var buf bytes.Buffer
lastWasGap := true
for i := 0; i < strLen; i++ {
ch := rune(str[i])
if isDelimiter(ch, delimiters...) {
lastWasGap = true
} else if lastWasGap {
buf.WriteRune(ch)
lastWasGap = false
}
}
return buf.String()
}
// private function (lower case func name)
func isDelimiter(ch rune, delimiters ...rune) bool {
if delimiters == nil {
return unicode.IsSpace(ch)
}
for _, delimiter := range delimiters {
if ch == delimiter {
return true
}
}
return false
}

201
vendor/github.com/docker/infrakit/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

235
vendor/github.com/docker/infrakit/README.md generated vendored Normal file
View File

@ -0,0 +1,235 @@
InfraKit
========
[![Circle CI](https://circleci.com/gh/docker/infrakit.png?style=shield&circle-token=50d2063f283f98b7d94746416c979af3102275b5)](https://circleci.com/gh/docker/infrakit)
[![Go Report Card](https://goreportcard.com/badge/github.com/docker/infrakit)](https://goreportcard.com/report/github.com/docker/infrakit)
[![codecov.io](https://codecov.io/github/docker/infrakit/coverage.svg?branch=master&token=z08ZKeIJfA)](https://codecov.io/github/docker/infrakit?branch=master)
_InfraKit_ is a toolkit for creating and managing declarative, self-healing infrastructure.
It breaks infrastructure automation down into simple, pluggable components. These components work together to actively
ensure the infrastructure state matches the user's specifications.
Although _InfraKit_ emphasizes primitives for building self-healing infrastructure, it also can be used passively like
conventional tools.
To get started, try the [tutorial](docs/tutorial.md).
### Who InfraKit is for
_InfraKit_ is designed to support setup and management of base infrastructure. For example, it can help you manage a
system like a cluster or container orchestrator, ideally relieving you of building custom release and maintenance tools.
As a result, it is a low-level tool intended to be used by infrastructure operators directly or indirectly
(as a toolkit) through a higher-level tool. Since _InfraKit_ is pluggable, it allows you to manage resources in diverse
environments while using shared components and consistent interfaces.
## Plugins
_InfraKit_ makes extensive use of _Plugins_ to manage arbitrary systems in diverse environments, which can be composed
to meet different needs.
See the [plugins](docs/plugins) documentation for more details.
## Building
### Your Environment
Make sure you check out the project following a convention for building Go projects. For example,
```shell
# Install Go - https://golang.org/dl/
# Assuming your go compiler is in /usr/local/go
export PATH=/usr/local/go/bin:$PATH
# Your dev environment
mkdir -p ~/go
export GOPATH=!$
export PATH=$GOPATH/bin:$PATH
mkdir -p ~/go/src/github.com/docker
cd !$
git clone git@github.com:docker/infrakit.git
cd infrakit
```
We recommended go version 1.7.1 or greater for all platforms.
Also install a few build tools:
```shell
make get-tools
```
### Running tests
```shell
$ make ci
```
### Binaries
```shell
$ make binaries
```
Executables will be placed in the `./build` directory.
This will produce binaries for tools and several reference Plugin implementations:
+ [`infrakit`](cmd/cli/README.md): a command line interface to interact with plugins
+ [`infrakit-group-default`](cmd/group/README.md): the default [Group plugin](./pkg/spi/group)
+ [`infrakit-instance-file`](examples/instance/file): an Instance plugin using dummy files to represent instances
+ [`infrakit-instance-terraform`](examples/instance/terraform):
an Instance plugin integrating [Terraform](https://www.terraform.io)
+ [`infrakit-instance-vagrant`](examples/instance/vagrant):
an Instance plugin using [Vagrant](https://www.vagrantup.com/)
+ [`infrakit-instance-maas`](examples/instance/maas):
an Instance plugin using [MaaS](https://maas.io)
+ [`infrakit-flavor-vanilla`](examples/flavor/vanilla):
a Flavor plugin for plain vanilla set up with user data and labels
+ [`infrakit-flavor-zookeeper`](examples/flavor/zookeeper):
a Flavor plugin for [Apache ZooKeeper](https://zookeeper.apache.org/) ensemble members
+ [`infrakit-flavor-swarm`](examples/flavor/swarm):
a Flavor plugin for Docker in [Swarm mode](https://docs.docker.com/engine/swarm/).
All provided binaries have a `help` sub-command to get usage and a `version` sub-command to identify the build revision.
# Design
## Configuration
_InfraKit_ uses JSON for configuration because it is composable and a widely accepted format for many
infrastructure SDKs and tools. Since the system is highly component-driven, our JSON format follows
simple patterns to support the composition of components.
A common pattern for a JSON object looks like this:
```json
{
"SomeKey": "ValueForTheKey",
"Properties": {
}
}
```
There is only one `Properties` field in this JSON and its value is a JSON object. The opaque
JSON value for `Properties` is decoded via the Go `Spec` struct defined within the package of the plugin --
for example -- [`vanilla.Spec`](pkg/plugin/flavor/vanilla/flavor.go).
The JSON above is a _value_, but the type of the value belongs outside the structure. For example, the
default Group [Spec](pkg/plugin/group/types/types.go) is composed of an Instance plugin, a Flavor plugin, and an
Allocation:
```json
{
"ID": "name-of-the-group",
"Properties": {
"Allocation": {
},
"Instance": {
"Plugin": "name-of-the-instance-plugin",
"Properties": {
}
},
"Flavor": {
"Plugin": "name-of-the-flavor-plugin",
"Properties": {
}
}
}
}
```
The group's Spec has `Instance` and `Flavor` fields which are used to indicate the type, and the value of the
fields follow the pattern of `<some_key>` and `Properties` as shown above.
The `Allocation` determines how the Group is managed. Allocation has two properties:
- `Size`: an integer for the number of instances to maintain in the Group
- `LogicalIDs`: a list of string identifiers, one will be associated with each Instance
Exactly one of these fields must be set, which defines whether the Group is treated as 'cattle' (`Size`) or 'pets'
(`LogicalIDs`). It is up to the Instance and Flavor plugins to determine how to use `LogicalID` values.
As an example, if you wanted to manage a Group of NGINX servers, you could
write a custom Group plugin for ultimate customization. The most concise configuration looks something like this:
```json
{
"ID": "nginx",
"Plugin": "my-nginx-group-plugin",
"Properties": {
"port": 8080
}
}
````
However, you would likely prefer to use the default Group plugin and implement a Flavor plugin to focus on
application-specific behavior. This gives you immediate support for any infrastructure that has an Instance plugin.
Your resulting configuration might look something like this:
```json
{
"ID": "nginx",
"Plugin": "group",
"Properties": {
"Allocation": {
"Size": 10
},
"Instance": {
"Plugin": "aws",
"Properties": {
"region": "us-west-2",
"ami": "ami-123456"
}
},
"Flavor": {
"Plugin": "nginx",
"Properties": {
"port": 8080
}
}
}
}
```
Once the configuration is ready, you will tell a Group plugin to
+ watch it
+ update it
+ destroy it
Watching the group as specified in the configuration means that the Group plugin will create
the instances if they don't already exist. New instances will be created if for any reason
existing instances have disappeared such that the state doesn't match your specifications.
Updating the group tells the Group plugin that your configuration may have changed. It will
then determine the changes necessary to ensure the state of the infrastructure matches the new
specification.
## Docs
Additional documentation can be found [here](docs).
## Reporting security issues
The maintainers take security seriously. If you discover a security issue,
please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
Security reports are greatly appreciated and we will publicly thank you for it.
We also like to send gifts—if you're into Docker schwag, make sure to let
us know. We currently do not offer a paid security bounty program, but are not
ruling it out in the future.
## Design goals
_InfraKit_ is currently focused on supporting setup and management of base infrastructure, such as a cluster
orchestrator. The image below illustrates an architecture we are working towards supporting - a Docker cluster in Swarm
mode.
![arch image](docs/images/arch.png)
This configuration co-locates _InfraKit_ with Swarm manager nodes and offers high availability of _InfraKit_ itself and
Swarm managers (using attached storage). _InfraKit_ is shown managing two groups - managers and workers that will be
continuously monitored, and may be modified with rolling updates.
Countless configurations are possible with _InfraKit_, but we believe achieving support for this configuration will
enable a large number of real-world use cases.
## Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved. Released under the Apache 2.0
license. See [LICENSE](LICENSE) for the full license text.

View File

@ -0,0 +1,74 @@
package server
import (
"fmt"
"net/http"
"strings"
log "github.com/Sirupsen/logrus"
)
// Interceptor implements http Handler and is used to intercept incoming requests to subscribe
// to topics and perform some validations before allowing the subscription to be established.
// Tasks that the interceptor can do include authn and authz, topic validation, etc.
type Interceptor struct {
// Do is the body of the http handler -- required
Do http.HandlerFunc
// Pre is called to before actual subscription to a topic happens.
// This is the hook where validation and authentication / authorization checks happen.
Pre func(topic string, headers map[string][]string) error
// Post is called when the client disconnects. This is optional.
Post func(topic string)
}
// ServeHTTP calls the before and after subscribe methods.
func (i *Interceptor) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
topic := clean(req.URL.Query().Get("topic"))
// need to strip out the / because it was added by the client
if strings.Index(topic, "/") == 0 {
topic = topic[1:]
}
err := i.Pre(topic, req.Header)
if err != nil {
log.Warningln("Error:", err)
http.Error(rw, err.Error(), getStatusCode(err))
return
}
i.Do.ServeHTTP(rw, req)
if i.Post != nil {
i.Post(topic)
}
}
func getStatusCode(e error) int {
switch e.(type) {
case ErrInvalidTopic:
return http.StatusNotFound
case ErrNotAuthorized:
return http.StatusUnauthorized
default:
return http.StatusInternalServerError
}
}
// ErrInvalidTopic is the error raised when topic is invalid
type ErrInvalidTopic string
func (e ErrInvalidTopic) Error() string {
return fmt.Sprintf("invalid topic: %s", string(e))
}
// ErrNotAuthorized is the error raised when the user isn't authorized
type ErrNotAuthorized string
func (e ErrNotAuthorized) Error() string {
return fmt.Sprintf("not authorized: %s", string(e))
}

View File

@ -0,0 +1,27 @@
package server
import (
"net"
"net/http"
)
// ListenAndServeOnSocket starts a minimal server (mostly for testing) listening at the given unix socket path.
func ListenAndServeOnSocket(socketPath string, optionalURLPattern ...string) (*Broker, error) {
urlPattern := "/"
if len(optionalURLPattern) > 0 {
urlPattern = optionalURLPattern[0]
}
listener, err := net.Listen("unix", socketPath)
if err != nil {
return nil, err
}
broker := NewBroker()
mux := http.NewServeMux()
mux.Handle(urlPattern, broker)
httpServer := &http.Server{
Handler: mux,
}
go httpServer.Serve(listener)
return broker, nil
}

View File

@ -0,0 +1,266 @@
package server
import (
"bytes"
"fmt"
"net/http"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/armon/go-radix"
"github.com/docker/infrakit/pkg/types"
)
// the amount of time to wait when pushing a message to
// a slow client or a client that closed after `range clients` started.
const patience time.Duration = time.Second * 1
type subscription struct {
topic string
exactMatch bool
ch chan []byte
}
type event struct {
topic string
data []byte
}
// Broker is the event message broker
type Broker struct {
// Close this to stop
stop chan struct{}
// Close this to stop the run loop
finish chan struct{}
// Events are pushed to this channel by the main events-gathering routine
notifier chan *event
// New client connections
newClients chan subscription
// Closed client connections
closingClients chan subscription
// Client connections registry
clients *radix.Tree
// how many clients
count int
}
// NewBroker returns an instance of the broker
func NewBroker() *Broker {
b := &Broker{
stop: make(chan struct{}),
finish: make(chan struct{}),
notifier: make(chan *event, 1),
newClients: make(chan subscription),
closingClients: make(chan subscription),
clients: radix.New(),
}
go b.run()
return b
}
// Stop stops the broker and exits the goroutine
func (b *Broker) Stop() {
close(b.stop)
}
func clean(topic string) string {
if len(topic) == 0 || topic == "." {
return "/"
}
if topic[0] != '/' {
return "/" + topic
}
return topic
}
//Check the topic ends with `/`
func checkExactMatch(topic string) bool {
return strings.LastIndex(topic, "/") != len(topic)-1
}
// Publish publishes a message at the topic
func (b *Broker) Publish(topic string, data interface{}, optionalTimeout ...time.Duration) error {
any, err := types.AnyValue(data)
if err != nil {
return err
}
topic = clean(topic)
if len(optionalTimeout) > 0 {
select {
case b.notifier <- &event{topic: topic, data: any.Bytes()}:
case <-time.After(optionalTimeout[0]):
return fmt.Errorf("timeout sending %v", topic)
}
} else {
b.notifier <- &event{topic: topic, data: any.Bytes()}
}
return nil
}
// ServerHTTP implements the HTTP handler
func (b *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
defer func() {
if v := recover(); v != nil {
log.Warningln("broker.ServeHTTP recovered:", v)
}
}()
topic := clean(req.URL.Query().Get("topic"))
// flusher is required for streaming
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
// Each connection registers its own message channel with the Broker's connections registry
messageChan := make(chan []byte)
// Signal the broker that we have a new connection
b.newClients <- subscription{topic: topic, exactMatch: checkExactMatch(topic), ch: messageChan}
// Remove this client from the map of connected clients
// when this handler exits.
defer func() {
b.closingClients <- subscription{topic: topic, ch: messageChan}
}()
// Listen to connection close and un-register messageChan
notify := rw.(http.CloseNotifier).CloseNotify()
for {
select {
case <-b.stop:
close(b.finish)
return
case <-notify:
return
default:
// Write to the ResponseWriter
// Server Sent Events compatible
fmt.Fprintf(rw, "data: %s\n\n", <-messageChan)
// Flush the data immediatly instead of buffering it for later.
flusher.Flush()
}
}
}
func (b *Broker) run() {
for {
select {
case <-b.finish:
// Disconnect all clients
b.clients.Walk(
func(key string, value interface{}) bool {
chset, ok := value.(map[chan []byte]bool)
if !ok {
panic("assert-failed")
}
for ch := range chset {
log.Infoln("Closing client connection at", key, "ch=", ch)
close(ch)
}
return false
})
log.Infoln("Broker finished")
return
case subscription := <-b.newClients:
// A new client has connected.
// Register their message channel
subs := map[chan []byte]bool{subscription.ch: subscription.exactMatch}
v, has := b.clients.Get(subscription.topic)
if has {
if v, ok := v.(map[chan []byte]bool); !ok {
panic("assert-failed: not a map of channels")
} else {
v[subscription.ch] = subscription.exactMatch
subs = v
}
}
b.clients.Insert(subscription.topic, subs)
b.count++
log.Infof("Connected: topic=%s => %d registered clients, ch=%v", subscription.topic, b.count, subscription.ch)
case subscription := <-b.closingClients:
// A client has dettached and we want to stop sending messages
if v, has := b.clients.Get(subscription.topic); has {
if subs, ok := v.(map[chan []byte]bool); !ok {
panic("assert-failed: not a map of channels")
} else {
delete(subs, subscription.ch)
if len(subs) == 0 {
b.clients.Delete(subscription.topic)
} else {
b.clients.Insert(subscription.topic, subs)
}
b.count--
log.Infof("Disconnected: topic=%s => %d registered clients, ch=%v", subscription.topic, b.count, subscription.ch)
}
}
case event, open := <-b.notifier:
if !open {
log.Infoln("Stopping broker")
return
}
// Remove any \n because it's meaningful in SSE spec.
// We could use base64 encode, but it hurts interoperability with browser/ javascript clients.
data := bytes.Replace(event.data, []byte("\n"), nil, -1)
b.clients.WalkPath(event.topic,
func(key string, value interface{}) bool {
chset, ok := value.(map[chan []byte]bool)
if !ok {
panic("assert-failed")
}
for ch, exact := range chset {
if exact && event.topic != key {
return false
}
select {
case ch <- data:
case <-time.After(patience):
log.Print("Skipping client.")
}
}
return false
})
}
}
}

16
vendor/github.com/docker/infrakit/pkg/cli/logging.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package cli
import log "github.com/Sirupsen/logrus"
// DefaultLogLevel is the default log level value.
var DefaultLogLevel = len(log.AllLevels) - 2
// SetLogLevel adjusts the logrus level.
func SetLogLevel(level int) {
if level > len(log.AllLevels)-1 {
level = len(log.AllLevels) - 1
} else if level < 0 {
level = 0
}
log.SetLevel(log.AllLevels[level])
}

View File

@ -0,0 +1,47 @@
package cli
import (
"fmt"
"io/ioutil"
"os"
"path"
log "github.com/Sirupsen/logrus"
"github.com/docker/infrakit/pkg/discovery"
"github.com/docker/infrakit/pkg/rpc/server"
)
// EnsureDirExists makes sure the directory where the socket file will be placed exists.
func EnsureDirExists(dir string) {
os.MkdirAll(dir, 0700)
}
// RunPlugin runs a plugin server, advertising with the provided name for discovery.
// The plugin should conform to the rpc call convention as implemented in the rpc package.
func RunPlugin(name string, plugin server.VersionedInterface, more ...server.VersionedInterface) {
dir := discovery.Dir()
EnsureDirExists(dir)
socketPath := path.Join(dir, name)
pidPath := path.Join(dir, name+".pid")
stoppable, err := server.StartPluginAtPath(socketPath, plugin, more...)
if err != nil {
log.Error(err)
}
// write PID file
err = ioutil.WriteFile(pidPath, []byte(fmt.Sprintf("%v", os.Getpid())), 0644)
if err != nil {
log.Error(err)
}
log.Infoln("PID file at", pidPath)
if stoppable != nil {
stoppable.AwaitStopped()
}
// clean up
os.Remove(pidPath)
log.Infoln("Removed PID file at", pidPath)
}

45
vendor/github.com/docker/infrakit/pkg/cli/version.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package cli
import (
"fmt"
"github.com/spf13/cobra"
)
var (
// Version is the build release identifier.
Version = "Unspecified"
// Revision is the build source control revision.
Revision = "Unspecified"
)
var info = map[string]map[string]interface{}{}
// RegisterInfo allows any packages that use this register additional information to be displayed by the command.
// For example, a swarm flavor could register the docker api version. This allows us to selectively incorporate
// only required dependencies based on package registration (in their init()) without explicitly pulling unused
// dependencies.
func RegisterInfo(key string, data map[string]interface{}) {
info[key] = data
}
// VersionCommand creates a cobra Command that prints build version information.
func VersionCommand() *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "print build version information",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("\n%-24s: %v", "Version", Version)
fmt.Printf("\n%-24s: %v", "Revision", Revision)
for k, m := range info {
fmt.Printf("\n\n%s", k)
for kk, vv := range m {
fmt.Printf("\n%-24s: %v", kk, vv)
}
fmt.Printf("\n")
}
fmt.Println()
},
}
}

106
vendor/github.com/docker/infrakit/pkg/discovery/dir.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package discovery
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
log "github.com/Sirupsen/logrus"
"github.com/docker/infrakit/pkg/plugin"
)
type errNotUnixSocket string
func (e errNotUnixSocket) Error() string {
return string(e)
}
// IsErrNotUnixSocket returns true if the error is due to the file not being a valid unix socket.
func IsErrNotUnixSocket(e error) bool {
_, is := e.(errNotUnixSocket)
return is
}
type dirPluginDiscovery struct {
dir string
lock sync.Mutex
}
// Find returns a plugin by name
func (r *dirPluginDiscovery) Find(name plugin.Name) (*plugin.Endpoint, error) {
lookup, _ := name.GetLookupAndType()
plugins, err := r.List()
if err != nil {
return nil, err
}
p, exists := plugins[lookup]
if !exists {
return nil, fmt.Errorf("Plugin not found: %s (looked up using %s)", name, lookup)
}
return p, nil
}
// newDirPluginDiscovery creates a registry instance with the given file directory path.
func newDirPluginDiscovery(dir string) (*dirPluginDiscovery, error) {
d := &dirPluginDiscovery{dir: dir}
// Perform a dummy read to catch obvious issues early (such as the directory not existing).
_, err := d.List()
return d, err
}
func (r *dirPluginDiscovery) dirLookup(entry os.FileInfo) (*plugin.Endpoint, error) {
if entry.Mode()&os.ModeSocket != 0 {
socketPath := filepath.Join(r.dir, entry.Name())
return &plugin.Endpoint{
Protocol: "unix",
Address: socketPath,
Name: entry.Name(),
}, nil
}
return nil, errNotUnixSocket(fmt.Sprintf("File is not a socket: %s", entry))
}
// List returns a list of plugins known, keyed by the name
func (r *dirPluginDiscovery) List() (map[string]*plugin.Endpoint, error) {
r.lock.Lock()
defer r.lock.Unlock()
log.Debugln("Opening:", r.dir)
entries, err := ioutil.ReadDir(r.dir)
if err != nil {
return nil, err
}
plugins := map[string]*plugin.Endpoint{}
for _, entry := range entries {
if !entry.IsDir() {
instance, err := r.dirLookup(entry)
if err != nil {
if !IsErrNotUnixSocket(err) {
log.Warningln("Loading plugin err=", err)
}
continue
}
if instance == nil {
log.Warningln("Plugin in nil=")
continue
}
log.Debugln("Discovered plugin at", instance.Address)
plugins[instance.Name] = instance
}
}
return plugins, nil
}

View File

@ -0,0 +1,60 @@
package discovery
import (
"fmt"
"os"
"os/user"
"path"
"github.com/docker/infrakit/pkg/plugin"
)
// Plugins provides access to plugin discovery.
type Plugins interface {
// Find looks up the plugin by name. The name can be of the form $lookup[/$subtype]. See GetLookupAndType().
Find(name plugin.Name) (*plugin.Endpoint, error)
List() (map[string]*plugin.Endpoint, error)
}
const (
// PluginDirEnvVar is the environment variable that may be used to customize the plugin discovery path.
PluginDirEnvVar = "INFRAKIT_PLUGINS_DIR"
)
// Dir returns the directory to use for plugin discovery, which may be customized by the environment.
func Dir() string {
if pluginDir := os.Getenv(PluginDirEnvVar); pluginDir != "" {
return pluginDir
}
home := os.Getenv("HOME")
if usr, err := user.Current(); err == nil {
home = usr.HomeDir
}
return path.Join(home, ".infrakit/plugins")
}
// NewPluginDiscovery creates a plugin discovery based on the environment configuration.
func NewPluginDiscovery() (Plugins, error) {
return NewPluginDiscoveryWithDirectory(Dir())
}
// NewPluginDiscoveryWithDirectory creates a plugin discovery based on the directory given.
func NewPluginDiscoveryWithDirectory(pluginDir string) (Plugins, error) {
stat, err := os.Stat(pluginDir)
if err == nil {
if !stat.IsDir() {
return nil, fmt.Errorf("Plugin dir %s is a file", pluginDir)
}
} else {
if os.IsNotExist(err) {
if err := os.MkdirAll(pluginDir, 0700); err != nil {
return nil, fmt.Errorf("Failed to create plugin dir %s: %s", pluginDir, err)
}
} else {
return nil, fmt.Errorf("Failed to access plugin dir %s: %s", pluginDir, err)
}
}
return newDirPluginDiscovery(pluginDir)
}

View File

@ -0,0 +1,30 @@
package metadata
import (
"path/filepath"
"strings"
"github.com/docker/infrakit/pkg/spi/metadata"
)
// Path returns the path compoments of a / separated path
func Path(path string) metadata.Path {
return metadata.Path(strings.Split(filepath.Clean(path), "/"))
}
// PathFromStrings returns the path from a list of strings
func PathFromStrings(a string, b ...string) metadata.Path {
if a != "" {
return metadata.Path(append([]string{a}, b...))
}
return metadata.Path(b)
}
// String returns the string representation of path
func String(p metadata.Path) string {
s := strings.Join([]string(p), "/")
if len(s) == 0 {
return "."
}
return s
}

View File

@ -0,0 +1,90 @@
package metadata
import (
log "github.com/Sirupsen/logrus"
"github.com/docker/infrakit/pkg/spi/metadata"
"github.com/docker/infrakit/pkg/types"
)
// NewPluginFromData creates a plugin out of a simple data map. Note the updates to the map
// is not guarded and synchronized with the reads.
func NewPluginFromData(data map[string]interface{}) metadata.Plugin {
return &plugin{data: data}
}
// NewPluginFromChannel returns a plugin implementation where reads and writes are serialized
// via channel of functions that have a view to the metadata. Closing the write channel stops
// the serialized read/writes and falls back to unserialized reads.
func NewPluginFromChannel(writes <-chan func(map[string]interface{})) metadata.Plugin {
readChan := make(chan func(map[string]interface{}))
p := &plugin{reads: readChan}
go func() {
defer func() {
if r := recover(); r != nil {
log.Warningln("Plugin stopped:", r)
}
}()
data := map[string]interface{}{}
for {
select {
case writer, open := <-writes:
if !open {
close(readChan)
p.reads = nil
return
}
writer(data)
case reader := <-p.reads:
copy := data
reader(copy)
}
}
}()
return p
}
type plugin struct {
data map[string]interface{}
reads chan func(data map[string]interface{})
}
// List returns a list of *child nodes* given a path, which is specified as a slice
// where for i > j path[i] is the parent of path[j]
func (p *plugin) List(path metadata.Path) ([]string, error) {
if p.reads == nil && p.data != nil {
return List(path, p.data), nil
}
children := make(chan []string)
p.reads <- func(data map[string]interface{}) {
children <- List(path, data)
return
}
return <-children, nil
}
// Get retrieves the value at path given.
func (p *plugin) Get(path metadata.Path) (*types.Any, error) {
if p.reads == nil && p.data != nil {
return types.AnyValue(Get(path, p.data))
}
value := make(chan *types.Any)
err := make(chan error)
p.reads <- func(data map[string]interface{}) {
v, e := types.AnyValue(Get(path, data))
value <- v
err <- e
return
}
return <-value, <-err
}

View File

@ -0,0 +1,230 @@
package metadata
import (
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"github.com/docker/infrakit/pkg/types"
)
var (
indexRoot = "\\[(([+|-]*[0-9]+)|((.*)=(.*)))\\]$"
arrayIndexExp = regexp.MustCompile("(.*)" + indexRoot)
indexExp = regexp.MustCompile("^" + indexRoot)
)
// Put sets the attribute of an object at path to the given value
func Put(path []string, value interface{}, object map[string]interface{}) bool {
return put(path, value, object)
}
// Get returns the attribute of the object at path
func Get(path []string, object interface{}) interface{} {
return get(path, object)
}
// GetValue returns the attribute of the object at path, as serialized blob
func GetValue(path []string, object interface{}) (*types.Any, error) {
if any, is := object.(*types.Any); is {
return any, nil
}
return types.AnyValue(Get(path, object))
}
// List lists the members at the path
func List(path []string, object interface{}) []string {
list := []string{}
v := get(path, object)
if v == nil {
return list
}
val := reflect.Indirect(reflect.ValueOf(v))
if any, is := v.(*types.Any); is {
var temp interface{}
if err := any.Decode(&temp); err == nil {
val = reflect.ValueOf(temp)
}
}
switch val.Kind() {
case reflect.Slice:
// this is a slice, so return the name as '[%d]'
for i := 0; i < val.Len(); i++ {
list = append(list, fmt.Sprintf("[%d]", i))
}
case reflect.Map:
for _, k := range val.MapKeys() {
list = append(list, k.String())
}
case reflect.Struct:
vt := val.Type()
for i := 0; i < vt.NumField(); i++ {
if vt.Field(i).PkgPath == "" {
list = append(list, vt.Field(i).Name)
}
}
}
sort.Strings(list)
return list
}
func put(p []string, value interface{}, store map[string]interface{}) bool {
if len(p) == 0 {
return false
}
key := p[0]
if key == "" {
return put(p[1:], value, store)
}
// check if key is an array index of the form <1>[<2>]
matches := arrayIndexExp.FindStringSubmatch(key)
if len(matches) > 2 && matches[1] != "" {
key = matches[1]
p = append([]string{key, fmt.Sprintf("[%s]", matches[2])}, p[1:]...)
return put(p, value, store)
}
s := reflect.Indirect(reflect.ValueOf(store))
switch s.Kind() {
case reflect.Slice:
return false // not supported
case reflect.Map:
if reflect.TypeOf(p[0]).AssignableTo(s.Type().Key()) {
m := s.MapIndex(reflect.ValueOf(p[0]))
if !m.IsValid() {
m = reflect.ValueOf(map[string]interface{}{})
s.SetMapIndex(reflect.ValueOf(p[0]), m)
}
if len(p) > 1 {
return put(p[1:], value, m.Interface().(map[string]interface{}))
}
s.SetMapIndex(reflect.ValueOf(p[0]), reflect.ValueOf(value))
return true
}
}
return false
}
func get(path []string, object interface{}) (value interface{}) {
if f, is := object.(func() interface{}); is {
object = f()
}
if len(path) == 0 {
return object
}
if any, is := object.(*types.Any); is {
var temp interface{}
if err := any.Decode(&temp); err == nil {
return get(path, temp)
}
return nil
}
key := path[0]
switch key {
case ".":
return object
case "":
return get(path[1:], object)
}
// check if key is an array index of the form <1>[<2>]
matches := arrayIndexExp.FindStringSubmatch(key)
if len(matches) > 2 && matches[1] != "" {
key = matches[1]
path = append([]string{key, fmt.Sprintf("[%s]", matches[2])}, path[1:]...)
return get(path, object)
}
v := reflect.Indirect(reflect.ValueOf(object))
switch v.Kind() {
case reflect.Slice:
i := 0
matches = indexExp.FindStringSubmatch(key)
if len(matches) > 0 {
if matches[2] != "" {
// numeric index
if index, err := strconv.Atoi(matches[1]); err == nil {
switch {
case index >= 0 && v.Len() > index:
i = index
case index < 0 && v.Len() > -index: // negative index like python
i = v.Len() + index
}
}
return get(path[1:], v.Index(i).Interface())
} else if matches[3] != "" {
// equality search index for 'field=check'
lhs := matches[4] // supports another select expression for extracting deeply from the struct
rhs := matches[5]
// loop through the array looking for field that matches the check value
for j := 0; j < v.Len(); j++ {
if el := get(tokenize(lhs), v.Index(j).Interface()); el != nil {
if fmt.Sprintf("%v", el) == rhs {
return get(path[1:], v.Index(j).Interface())
}
}
}
}
}
case reflect.Map:
value := v.MapIndex(reflect.ValueOf(key))
if value.IsValid() {
return get(path[1:], value.Interface())
}
case reflect.Struct:
fv := v.FieldByName(key)
if !fv.IsValid() {
return nil
}
if !fv.CanInterface() {
return nil
}
return get(path[1:], fv.Interface())
}
return nil
}
// With quoting to support azure rm type names: e.g. Microsoft.Network/virtualNetworks
// This will split a sting like /Resources/'Microsoft.Network/virtualNetworks'/managerSubnet/Name" into
// [ , Resources, Microsoft.Network/virtualNetworks, managerSubnet, Name]
func tokenize(s string) []string {
if len(s) == 0 {
return []string{}
}
a := []string{}
start := 0
quoted := false
for i := 0; i < len(s); i++ {
switch s[i] {
case '/':
if !quoted {
a = append(a, strings.Replace(s[start:i], "'", "", -1))
start = i + 1
}
case '\'':
quoted = !quoted
}
}
if start < len(s)-1 {
a = append(a, strings.Replace(s[start:], "'", "", -1))
}
return a
}

45
vendor/github.com/docker/infrakit/pkg/plugin/name.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package plugin
import (
"fmt"
"strings"
)
// Name is a reference to the plugin. Places where it appears include JSON files as type of field `Plugin`.
type Name string
// GetLookupAndType returns the plugin name for lookup and sub-type supported by the plugin.
// The name follows a microformat of $plugin[/$subtype] where $plugin is used for the discovery / lookup by name.
// The $subtype is used for the Type parameter in the RPC requests.
// Example: instance-file/json means lookup socket file 'instance-file' and the type is 'json'.
func (r Name) GetLookupAndType() (string, string) {
name := string(r)
if first := strings.Index(name, "/"); first >= 0 {
return name[0:first], name[first+1:]
}
return name, ""
}
// String returns the string representation
func (r Name) String() string {
return string(r)
}
// MarshalJSON implements the JSON marshaler interface
func (r Name) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, r.String())), nil
}
// UnmarshalJSON implements the JSON unmarshaler interface
func (r *Name) UnmarshalJSON(data []byte) error {
str := string(data)
start := strings.Index(str, "\"")
last := strings.LastIndex(str, "\"")
if start == 0 && last == len(str)-1 {
str = str[start+1 : last]
} else {
return fmt.Errorf("bad-format-for-name:%v", string(data))
}
*r = Name(str)
return nil
}

97
vendor/github.com/docker/infrakit/pkg/plugin/plugin.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package plugin
import (
"github.com/docker/infrakit/pkg/spi"
"github.com/docker/infrakit/pkg/template"
"github.com/docker/infrakit/pkg/types"
)
// Spec models a canonical pattern of fields that exist in a struct/ map / union that indicates the block is a plugin.
type Spec struct {
// Plugin is the name of the plugin
Plugin Name
// Properties is the configuration of the plugin
Properties *types.Any
}
// Informer is the interface that gives information about the plugin such as version and interface methods
type Informer interface {
// GetInfo returns metadata about the plugin
GetInfo() (Info, error)
// GetFunctions returns metadata about the plugin's template functions, if the plugin supports templating.
GetFunctions() (map[string][]template.Function, error)
}
// Info is metadata for the plugin
type Info struct {
// Vendor captures vendor-specific information about this plugin
Vendor *spi.VendorInfo
// Implements is a list of plugin interface and versions this plugin supports
Implements []spi.InterfaceSpec
// Interfaces (optional) is a slice of interface descriptions by the type and version
Interfaces []InterfaceDescription `json:",omitempty"`
}
// InterfaceDescription is a holder for RPC interface version and method descriptions
type InterfaceDescription struct {
spi.InterfaceSpec
Methods []MethodDescription
}
// MethodDescription contains information about the RPC method such as the request and response
// example structs. The request value can be used as an example input, possibly with example
// plugin-custom properties if the underlying plugin implements the InputExample interface.
// The response value gives an example of the example response.
type MethodDescription struct {
// Request is the RPC request example
Request Request
// Response is the RPC response example
Response Response
}
// Request models the RPC request payload
type Request struct {
// Version is the version of the JSON RPC protocol
Version string `json:"jsonrpc"`
// Method is the rpc method to use in the payload field 'method'
Method string `json:"method"`
// Params contains example inputs. This can be a zero value struct or one with defaults
Params interface{} `json:"params"`
// ID is the request is
ID string `json:"id"`
}
// Response is the RPC response struct
type Response struct {
// Result is the result of the call
Result interface{} `json:"result"`
// ID is id matching the request ID
ID string `json:"id"`
}
// Endpoint is the address of the plugin service
type Endpoint struct {
// Name is the key used to refer to this plugin in all JSON configs
Name string
// Protocol is the transport protocol -- unix, tcp, etc.
Protocol string
// Address is the how to connect - socket file, host:port, etc.
Address string
}

View File

@ -0,0 +1,76 @@
package client
import (
"bytes"
log "github.com/Sirupsen/logrus"
"github.com/docker/infrakit/pkg/spi"
"github.com/gorilla/rpc/v2/json2"
"net"
"net/http"
"net/http/httputil"
"sync"
)
type client struct {
http http.Client
addr string
}
// New creates a new Client that communicates with a unix socket and validates the remote API.
func New(socketPath string, api spi.InterfaceSpec) (Client, error) {
dialUnix := func(proto, addr string) (conn net.Conn, err error) {
return net.Dial("unix", socketPath)
}
unvalidatedClient := &client{addr: socketPath, http: http.Client{Transport: &http.Transport{Dial: dialUnix}}}
cl := &handshakingClient{client: unvalidatedClient, iface: api, lock: &sync.Mutex{}}
// check handshake
if err := cl.handshake(); err != nil {
// Note - we still return the client with the possibility of doing a handshake later on
// if we provide an api for the plugin to recheck later. This way, individual components
// can stay running and recalibrate themselves after the user has corrected the problems.
return cl, err
}
return cl, nil
}
func (c client) Addr() string {
return c.addr
}
func (c client) Call(method string, arg interface{}, result interface{}) error {
message, err := json2.EncodeClientRequest(method, arg)
if err != nil {
return err
}
req, err := http.NewRequest("POST", "http://a/", bytes.NewReader(message))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
requestData, err := httputil.DumpRequest(req, true)
if err == nil {
log.Debugf("Sending request %s", string(requestData))
} else {
log.Error(err)
}
resp, err := c.http.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
responseData, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Debugf("Received response %s", string(responseData))
} else {
log.Error(err)
}
return json2.DecodeClientResponse(resp.Body, result)
}

View File

@ -0,0 +1,88 @@
package client
import (
"fmt"
"github.com/docker/infrakit/pkg/rpc"
"github.com/docker/infrakit/pkg/spi"
"sync"
)
type handshakingClient struct {
client Client
iface spi.InterfaceSpec
// handshakeResult handles the tri-state outcome of handshake state:
// - handshake has not yet completed (nil)
// - handshake completed successfully (non-nil result, nil error)
// - handshake failed (non-nil result, non-nil error)
handshakeResult *handshakeResult
// lock guards handshakeResult
lock *sync.Mutex
}
type handshakeResult struct {
err error
}
type errVersionMismatch string
// Error implements error interface
func (e errVersionMismatch) Error() string {
return string(e)
}
// IsErrVersionMismatch return true if the error is from mismatched api versions.
func IsErrVersionMismatch(e error) bool {
_, is := e.(errVersionMismatch)
return is
}
func (c *handshakingClient) handshake() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.handshakeResult == nil {
req := rpc.ImplementsRequest{}
resp := rpc.ImplementsResponse{}
if err := c.client.Call("Handshake.Implements", req, &resp); err != nil {
return err
}
err := fmt.Errorf("Plugin does not support interface %v", c.iface)
if resp.APIs != nil {
for _, iface := range resp.APIs {
if iface.Name == c.iface.Name {
if iface.Version == c.iface.Version {
err = nil
break
} else {
err = errVersionMismatch(fmt.Sprintf(
"Plugin supports %s interface version %s, client requires %s",
iface.Name,
iface.Version,
c.iface.Version))
}
}
}
}
c.handshakeResult = &handshakeResult{err: err}
}
return c.handshakeResult.err
}
func (c *handshakingClient) Addr() string {
return c.client.Addr()
}
func (c *handshakingClient) Call(method string, arg interface{}, result interface{}) error {
if err := c.handshake(); err != nil {
return err
}
return c.client.Call(method, arg, result)
}

Some files were not shown because too many files have changed in this diff Show More