runtime: cri-o annotations have been moved to podman

Let's swith to depending on podman which also simplies indirect
dependency on kubernetes components. And it helps to avoid cri-o
security issues like CVE-2022-1708 as well.

Fixes: #4972
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
Peng Tao 2022-08-24 18:07:47 +08:00
parent 2b5dc2ad39
commit a06d819b24
410 changed files with 26290 additions and 7742 deletions

View File

@ -4,56 +4,58 @@ go 1.14
require (
code.cloudfoundry.org/bytefmt v0.0.0-20211005130812-5bb3c17173e5
github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml v1.2.0
github.com/blang/semver v3.5.1+incompatible
github.com/blang/semver/v4 v4.0.0
github.com/containerd/cgroups v1.0.5-0.20220625035431-cf7417bca682
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.6.6
github.com/containerd/cri-containerd v1.11.1-0.20190125013620-4dd6735020f5
github.com/containerd/cri-containerd v1.19.0
github.com/containerd/fifo v1.0.0
github.com/containerd/ttrpc v1.1.0
github.com/containerd/typeurl v1.0.2
github.com/containernetworking/plugins v1.1.1
github.com/containers/podman/v4 v4.2.0
github.com/coreos/go-systemd/v22 v22.3.2
github.com/cri-o/cri-o v1.0.0-rc2.0.20170928185954-3394b3b2d6af
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.4.9
github.com/frankban/quicktest v1.13.1 // indirect
github.com/fsnotify/fsnotify v1.5.4
github.com/go-ini/ini v1.28.2
github.com/go-openapi/errors v0.20.2
github.com/go-openapi/runtime v0.19.21
github.com/go-openapi/strfmt v0.21.1
github.com/go-openapi/swag v0.21.1
github.com/go-openapi/validate v0.22.0
github.com/godbus/dbus/v5 v5.0.6
github.com/godbus/dbus/v5 v5.1.0
github.com/gogo/protobuf v1.3.2
github.com/hashicorp/go-multierror v1.1.1
github.com/intel-go/cpuid v0.0.0-20210602155658-5747e5cec0d9
github.com/mdlayher/vsock v1.1.0
github.com/opencontainers/runc v1.1.2
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runc v1.1.3
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab
github.com/opencontainers/selinux v1.10.1
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.1
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.30.0
github.com/prometheus/common v0.32.1
github.com/prometheus/procfs v0.7.3
github.com/rogpeppe/go-internal v1.8.1-0.20210923151022-86f73c517451 // indirect
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0
github.com/urfave/cli v1.22.2
github.com/vishvananda/netlink v1.1.1-0.20210924202909-187053b97868
github.com/sirupsen/logrus v1.9.0
github.com/stretchr/testify v1.8.0
github.com/urfave/cli v1.22.4
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965
go.opentelemetry.io/otel v1.3.0
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
go.opentelemetry.io/otel/sdk v1.3.0
go.opentelemetry.io/otel/trace v1.3.0
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
google.golang.org/grpc v1.43.0
golang.org/x/net v0.0.0-20220722155237-a158d28d115b
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
google.golang.org/grpc v1.47.0
k8s.io/apimachinery v0.22.5
k8s.io/cri-api v0.23.1
)

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ import (
"syscall"
ctrAnnotations "github.com/containerd/containerd/pkg/cri/annotations"
crioAnnotations "github.com/cri-o/cri-o/pkg/annotations"
podmanAnnotations "github.com/containers/podman/v4/pkg/annotations"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/resource"
@ -46,17 +46,17 @@ var (
// CRIContainerTypeKeyList lists all the CRI keys that could define
// the container type from annotations in the config.json.
CRIContainerTypeKeyList = []string{ctrAnnotations.ContainerType, crioAnnotations.ContainerType, dockershimAnnotations.ContainerTypeLabelKey}
CRIContainerTypeKeyList = []string{ctrAnnotations.ContainerType, podmanAnnotations.ContainerType, dockershimAnnotations.ContainerTypeLabelKey}
// CRISandboxNameKeyList lists all the CRI keys that could define
// the sandbox ID (sandbox ID) from annotations in the config.json.
CRISandboxNameKeyList = []string{ctrAnnotations.SandboxID, crioAnnotations.SandboxID, dockershimAnnotations.SandboxIDLabelKey}
CRISandboxNameKeyList = []string{ctrAnnotations.SandboxID, podmanAnnotations.SandboxID, dockershimAnnotations.SandboxIDLabelKey}
// CRIContainerTypeList lists all the maps from CRI ContainerTypes annotations
// to a virtcontainers ContainerType.
CRIContainerTypeList = []annotationContainerType{
{crioAnnotations.ContainerTypeSandbox, vc.PodSandbox},
{crioAnnotations.ContainerTypeContainer, vc.PodContainer},
{podmanAnnotations.ContainerTypeSandbox, vc.PodSandbox},
{podmanAnnotations.ContainerTypeContainer, vc.PodContainer},
{ctrAnnotations.ContainerTypeSandbox, vc.PodSandbox},
{ctrAnnotations.ContainerTypeContainer, vc.PodContainer},
{dockershimAnnotations.ContainerTypeLabelSandbox, vc.PodSandbox},
@ -1047,8 +1047,8 @@ func getShmSize(c vc.ContainerConfig) (uint64, error) {
// IsCRIOContainerManager check if a Pod is created from CRI-O
func IsCRIOContainerManager(spec *specs.Spec) bool {
if val, ok := spec.Annotations[crioAnnotations.ContainerType]; ok {
if val == crioAnnotations.ContainerTypeSandbox || val == crioAnnotations.ContainerTypeContainer {
if val, ok := spec.Annotations[podmanAnnotations.ContainerType]; ok {
if val == podmanAnnotations.ContainerTypeSandbox || val == podmanAnnotations.ContainerTypeContainer {
return true
}
}

View File

@ -16,7 +16,7 @@ import (
"testing"
ctrAnnotations "github.com/containerd/containerd/pkg/cri/annotations"
crioAnnotations "github.com/cri-o/cri-o/pkg/annotations"
podmanAnnotations "github.com/containers/podman/v4/pkg/annotations"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
"golang.org/x/sys/unix"
@ -224,22 +224,22 @@ func TestContainerType(t *testing.T) {
},
{
description: "crio unexpected annotation, expect error",
annotationKey: crioAnnotations.ContainerType,
annotationKey: podmanAnnotations.ContainerType,
annotationValue: "foo",
expectedType: vc.UnknownContainerType,
expectedErr: true,
},
{
description: "crio sandbox",
annotationKey: crioAnnotations.ContainerType,
annotationValue: string(crioAnnotations.ContainerTypeSandbox),
annotationKey: podmanAnnotations.ContainerType,
annotationValue: string(podmanAnnotations.ContainerTypeSandbox),
expectedType: vc.PodSandbox,
expectedErr: false,
},
{
description: "crio container",
annotationKey: crioAnnotations.ContainerType,
annotationValue: string(crioAnnotations.ContainerTypeContainer),
annotationKey: podmanAnnotations.ContainerType,
annotationValue: string(podmanAnnotations.ContainerTypeContainer),
expectedType: vc.PodContainer,
expectedErr: false,
},
@ -287,7 +287,7 @@ func TestSandboxIDSuccessful(t *testing.T) {
assert := assert.New(t)
ociSpec.Annotations = map[string]string{
crioAnnotations.SandboxID: testSandboxID,
podmanAnnotations.SandboxID: testSandboxID,
}
sandboxID, err := SandboxID(ociSpec)
@ -883,15 +883,15 @@ func TestIsCRIOContainerManager(t *testing.T) {
result bool
}{
{
annotations: map[string]string{crioAnnotations.ContainerType: "abc"},
annotations: map[string]string{podmanAnnotations.ContainerType: "abc"},
result: false,
},
{
annotations: map[string]string{crioAnnotations.ContainerType: crioAnnotations.ContainerTypeSandbox},
annotations: map[string]string{podmanAnnotations.ContainerType: podmanAnnotations.ContainerTypeSandbox},
result: true,
},
{
annotations: map[string]string{crioAnnotations.ContainerType: crioAnnotations.ContainerTypeContainer},
annotations: map[string]string{podmanAnnotations.ContainerType: podmanAnnotations.ContainerTypeContainer},
result: true,
},
}

View File

@ -1,5 +1,2 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test
/toml.test
/toml-test

View File

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View File

@ -1,3 +0,0 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View File

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View File

@ -1,46 +1,26 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
reflection interface similar to Go's standard library `json` and `xml` packages.
Spec: https://github.com/toml-lang/toml
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godocs.io/github.com/BurntSushi/toml
Documentation: https://godoc.org/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
Installation:
This library requires Go 1.13 or newer; add it to your go.mod with:
```bash
go get github.com/BurntSushi/toml
```
% go get github.com/BurntSushi/toml@latest
Try the toml validator:
It also comes with a TOML validator CLI tool:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
% tomlv some-toml-file.toml
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
For the simplest example, consider some TOML file as just a list of keys and
values:
```toml
Age = 25
@ -50,29 +30,23 @@ Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
Which can be decoded with:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
_, err := toml.Decode(tomlData, &conf)
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
You can also use struct tags if your struct field name doesn't map to a TOML key
value directly:
```toml
some_key_NAME = "wat"
@ -80,139 +54,67 @@ some_key_NAME = "wat"
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Beware that like other decoders **only exported fields** are considered when
encoding and decoding; private fields are silently ignored.
Here's an example that automatically parses duration strings into
`time.Duration` values:
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
Here's an example that automatically parses values in a `mail.Address`:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
contacts = [
"Donald Duck <donald@duckburg.com>",
"Scrooge McDuck <scrooge@duckburg.com>",
]
```
And the corresponding Go types are:
Can be decoded with:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
// Create address type which satisfies the encoding.TextUnmarshaler interface.
type address struct {
*mail.Address
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
func (a *address) UnmarshalText(text []byte) error {
var err error
a.Address, err = mail.ParseAddress(string(text))
return err
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
// Decode it.
func decode() {
blob := `
contacts = [
"Donald Duck <donald@duckburg.com>",
"Scrooge McDuck <scrooge@duckburg.com>",
]
`
type server struct {
IP string
DC string
}
var contacts struct {
Contacts []address
}
type clients struct {
Data [][]interface{}
Hosts []string
_, err := toml.Decode(blob, &contacts)
if err != nil {
log.Fatal(err)
}
for _, c := range contacts.Contacts {
fmt.Printf("%#v\n", c.Address)
}
// Output:
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
A working example of the above can be found in `_examples/example.{go,toml}`.
### More complex usage
See the [`_example/`](/_example) directory for a more complex example.

View File

@ -1,54 +1,171 @@
package toml
import (
"bytes"
"encoding"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"reflect"
"strconv"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
func Unmarshal(data []byte, v interface{}) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
// Decode the TOML data in to the pointer v.
//
// See the documentation on Decoder for a description of the decoding process.
func Decode(data string, v interface{}) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at path and decode it for you.
func DecodeFile(path string, v interface{}) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
// This type can be used for any value, which will cause decoding to be delayed.
// You can use the PrimitiveDecode() function to "manually" decode these values.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
// NOTE: The underlying representation of a `Primitive` value is subject to
// change. Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
// NOTE: Primitive values are still parsed, so using them will only avoid the
// overhead of reflection. They can be useful when you don't know the exact type
// of TOML data until runtime.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
// The significand precision for float32 and float64 is 24 and 53 bits; this is
// the range a natural number can be stored in a float without loss of data.
const (
maxSafeFloat32Int = 16777215 // 2^24-1
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
)
// Decoder decodes TOML data.
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
// TOML tables correspond to Go structs or maps (dealer's choice they can be
// used interchangeably).
//
// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
//
// time.Duration types are treated as nanoseconds if the TOML value is an
// integer, or they're parsed with time.ParseDuration() if they're strings.
//
// All other TOML types (float, string, int, bool and array) correspond to the
// obvious Go types.
//
// An exception to the above rules is if a type implements the TextUnmarshaler
// interface, in which case any primitive TOML value (floats, strings, integers,
// booleans, datetimes) will be converted to a []byte and given to the value's
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
// that don't match the key name exactly (see the example). A case insensitive
// match to struct names will be tried if an exact match can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there may
// exist TOML values that cannot be placed into your representation, and there
// may be parts of your representation that do not correspond to TOML values.
// This loose mapping can be made stricter by using the IsDefined and/or
// Undecoded methods on the MetaData returned.
//
// This decoder does not handle cyclic types. Decode will not terminate if a
// cyclic type is passed.
type Decoder struct {
r io.Reader
}
// NewDecoder creates a new Decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
var (
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
)
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
s := "%q"
if reflect.TypeOf(v) == nil {
s = "%v"
}
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
// Check if this is a supported type: struct, map, interface{}, or something
// that implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
}
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
p, err := parse(string(data))
if err != nil {
return MetaData{}, err
}
md := MetaData{
mapping: p.mapping,
keyInfo: p.keyInfo,
keys: p.ordered,
decoded: make(map[string]struct{}, len(p.ordered)),
context: nil,
data: data,
}
return md, md.unify(p.mapping, rv)
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
@ -68,90 +185,15 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == primitiveType {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
@ -163,36 +205,24 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
rvi := rv.Interface()
if v, ok := rvi.(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// TODO:
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
// array. In particular, the unmarshaler should only be applied to primitive
// TOML values. But at this point, it will be applied to all kinds of values
// and produce an incorrect error whenever those values are hashes or arrays
// (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
@ -218,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
case reflect.Float32, reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
return md.e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
@ -237,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
return md.e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
@ -259,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.decoded[md.context.add(key).String()] = struct{}{}
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
err := md.unify(datum, subv)
if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
}
}
}
@ -277,28 +305,43 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
keyType := rv.Type().Key().Kind()
if keyType != reflect.String && keyType != reflect.Interface {
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
keyType, rv.Type())
}
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
return md.badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.decoded[md.context.add(k).String()] = struct{}{}
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
err := md.unify(v, indirect(rvval))
if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rvkey := indirect(reflect.New(rv.Type().Key()))
switch keyType {
case reflect.Interface:
rvkey.Set(reflect.ValueOf(k))
case reflect.String:
rvkey.SetString(k)
}
rv.SetMapIndex(rvkey, rvval)
}
return nil
@ -310,12 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
return md.badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
if l := datav.Len(); l != rv.Len() {
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
}
return md.unifySliceArray(datav, rv)
}
@ -326,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
return md.badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
@ -337,37 +378,45 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
l := data.Len()
for i := 0; i < l; i++ {
err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
if err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
_, ok := rv.Interface().(json.Number)
if ok {
if i, ok := data.(int64); ok {
rv.SetString(strconv.FormatInt(i, 10))
} else if f, ok := data.(float64); ok {
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
} else {
return md.badtype("string", data)
}
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
return md.badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
rvk := rv.Kind()
if num, ok := data.(float64); ok {
switch rv.Kind() {
switch rvk {
case reflect.Float32:
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
return md.parseErr(errParseRange{i: num, size: rvk.String()})
}
fallthrough
case reflect.Float64:
rv.SetFloat(num)
@ -376,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
}
return nil
}
return badtype("float", data)
if num, ok := data.(int64); ok {
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
return md.parseErr(errParseRange{i: num, size: rvk.String()})
}
rv.SetFloat(float64(num))
return nil
}
return md.badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
_, ok := rv.Interface().(time.Duration)
if ok {
// Parse as string duration, and fall back to regular integer parsing
// (as nanosecond) if this is not a string.
if s, ok := data.(string); ok {
dur, err := time.ParseDuration(s)
if err != nil {
return md.parseErr(errParseDuration{s})
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
rv.SetInt(int64(dur))
return nil
}
return nil
}
return badtype("integer", data)
num, ok := data.(int64)
if !ok {
return md.badtype("integer", data)
}
rvk := rv.Kind()
switch {
case rvk >= reflect.Int && rvk <= reflect.Int64:
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
return md.parseErr(errParseRange{i: num, size: rvk.String()})
}
rv.SetInt(num)
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
unum := uint64(num)
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
return md.parseErr(errParseRange{i: num, size: rvk.String()})
}
rv.SetUint(unum)
default:
panic("unreachable")
}
return nil
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
@ -431,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
return md.badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
@ -439,10 +494,16 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
case Marshaler:
text, err := sdata.MarshalTOML()
if err != nil {
return err
}
s = string(text)
case encoding.TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
@ -459,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
@ -467,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
return nil
}
func (md *MetaData) badtype(dst string, data interface{}) error {
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
}
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
return ParseError{
LastKey: k,
Position: md.keyInfo[k].pos,
Line: md.keyInfo[k].pos.Line,
err: err,
input: string(md.data),
}
}
func (md *MetaData) e(format string, args ...interface{}) error {
f := "toml: "
if len(md.context) > 0 {
f = fmt.Sprintf("toml: (last key %q): ", md.context)
p := md.keyInfo[md.context.String()].pos
if p.Line > 0 {
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
}
}
return fmt.Errorf(f+format, args...)
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
// Pointers are followed until the value is not a pointer. New values are
// allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of interest
// to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
pvi := pv.Interface()
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
return pv
}
if _, ok := pvi.(Unmarshaler); ok {
return pv
}
}
@ -498,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
rvi := rv.Interface()
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
return true
}
if _, ok := rvi.(Unmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

View File

@ -0,0 +1,19 @@
//go:build go1.16
// +build go1.16
package toml
import (
"io/fs"
)
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}

View File

@ -0,0 +1,21 @@
package toml
import (
"encoding"
"io"
)
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
// Deprecated: use MetaData.PrimitiveDecode.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
// Deprecated: use NewDecoder(reader).Decode(&value).
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

View File

@ -1,27 +1,13 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
Package toml implements decoding and encoding of TOML files.
The specification implemented: https://github.com/toml-lang/toml
This package supports TOML v1.0.0, as listed on https://toml.io
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
There is also support for delaying decoding with the Primitive type, and
querying the set of keys in a TOML document with the MetaData type.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
print the type of each key.
*/
package toml

View File

@ -2,57 +2,127 @@ package toml
import (
"bufio"
"encoding"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/BurntSushi/toml/internal"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
errArrayNilElement = errors.New("toml: cannot encode array with nil element")
errNonString = errors.New("toml: cannot encode a map with non-string key type")
errNoKey = errors.New("toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
var dblQuotedReplacer = strings.NewReplacer(
"\"", "\\\"",
"\\", "\\\\",
"\x00", `\u0000`,
"\x01", `\u0001`,
"\x02", `\u0002`,
"\x03", `\u0003`,
"\x04", `\u0004`,
"\x05", `\u0005`,
"\x06", `\u0006`,
"\x07", `\u0007`,
"\b", `\b`,
"\t", `\t`,
"\n", `\n`,
"\x0b", `\u000b`,
"\f", `\f`,
"\r", `\r`,
"\x0e", `\u000e`,
"\x0f", `\u000f`,
"\x10", `\u0010`,
"\x11", `\u0011`,
"\x12", `\u0012`,
"\x13", `\u0013`,
"\x14", `\u0014`,
"\x15", `\u0015`,
"\x16", `\u0016`,
"\x17", `\u0017`,
"\x18", `\u0018`,
"\x19", `\u0019`,
"\x1a", `\u001a`,
"\x1b", `\u001b`,
"\x1c", `\u001c`,
"\x1d", `\u001d`,
"\x1e", `\u001e`,
"\x1f", `\u001f`,
"\x7f", `\u007f`,
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
var (
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
)
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
// Marshaler is the interface implemented by types that can marshal themselves
// into valid TOML.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions.
//
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
// representation.
//
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
// encoding the value as custom TOML.
//
// If you want to write arbitrary binary data then you will need to use
// something like base64 since TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
//
// Go maps will be sorted alphabetically by key for deterministic output.
//
// The toml struct tag can be used to provide the key name; if omitted the
// struct field name will be used. If the "omitempty" option is present the
// following value will be skipped:
//
// - arrays, slices, maps, and string with len of 0
// - struct with all zero values
// - bool false
//
// If omitzero is given all int and float types with a value of 0 will be
// skipped.
//
// Encoding Go values without a corresponding TOML representation will return an
// error. Examples of this includes maps with non-string keys, slices with nil
// elements, embedded non-struct types, and nested slices containing maps or
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
// String to use for a single indentation level; default is two spaces.
Indent string
w *bufio.Writer
hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
@ -60,29 +130,10 @@ func NewEncoder(w io.Writer) *Encoder {
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
// Encode writes a TOML representation of the Go value to the Encoder's writer.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@ -106,13 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
// If we can marshal the type to text, then we use that. This prevents the
// encoder for handling these types as generic structs (or whatever the
// underlying type of a TextMarshaler is).
switch {
case isMarshaler(rv):
enc.writeKeyValue(key, rv, false)
return
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
return
}
@ -123,12 +176,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
enc.writeKeyValue(key, rv, false)
}
case reflect.Interface:
if rv.IsNil() {
@ -148,55 +201,114 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
// eElement encodes any value that can be an array element.
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
format := time.RFC3339Nano
switch v.Location() {
case internal.LocalDatetime:
format = "2006-01-02T15:04:05.999999999"
case internal.LocalDate:
format = "2006-01-02"
case internal.LocalTime:
format = "15:04:05.999999999"
}
switch v.Location() {
default:
enc.wf(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
enc.wf(v.In(time.UTC).Format(format))
}
return
case Marshaler:
s, err := v.MarshalTOML()
if err != nil {
encPanic(err)
}
if s == nil {
encPanic(errors.New("MarshalTOML returned nil and no error"))
}
enc.w.Write(s)
return
case encoding.TextMarshaler:
s, err := v.MarshalText()
if err != nil {
encPanic(err)
}
if s == nil {
encPanic(errors.New("MarshalText returned nil and no error"))
}
enc.writeQuoted(string(s))
return
case time.Duration:
enc.writeQuoted(v.String())
return
case json.Number:
n, _ := rv.Interface().(json.Number)
if n == "" { /// Useful zero value.
enc.w.WriteByte('0')
return
} else if v, err := n.Int64(); err == nil {
enc.eElement(reflect.ValueOf(v))
return
} else if v, err := n.Float64(); err == nil {
enc.eElement(reflect.ValueOf(v))
return
}
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
case reflect.Ptr:
enc.eElement(rv.Elem())
return
case reflect.String:
enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Struct:
enc.eStruct(nil, rv, true)
case reflect.Map:
enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
// By the TOML spec, all floats must have a decimal with at least one number on
// either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
@ -205,14 +317,14 @@ func floatAddDecimal(fstr string) string {
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
elem := eindirect(rv.Index(i))
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
@ -226,44 +338,43 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
trv := eindirect(rv.Index(i))
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
enc.eMapOrStruct(key, trv)
enc.eMapOrStruct(key, trv, false)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.wf("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
enc.eMapOrStruct(key, rv)
enc.eMapOrStruct(key, rv, false)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
enc.eMap(key, rv, inline)
case reflect.Struct:
enc.eStruct(key, rv)
enc.eStruct(key, rv, inline)
default:
// Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
@ -274,118 +385,179 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
if isNil(val) {
continue
}
enc.encode(key.add(mapKey), mrv)
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
}
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
if inline {
enc.wf("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
enc.wf("}")
}
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
}
return t
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
//
// Fields is a [][]int: for fieldsDirect this always has one entry (the
// struct index). For fieldsSub it contains two entries: the parent field
// index from tv, and the field indexes for the fields of the sub.
var (
rt = rv.Type()
fieldsDirect, fieldsSub [][]int
addFields func(rt reflect.Type, rv reflect.Value, start []int)
)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
opts := getOptions(f.Tag)
if opts.skip {
continue
}
frv := eindirect(rv.Field(i))
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
// Non-struct anonymous fields use the normal encoding logic.
if isEmbed {
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
addFields(frv.Type(), frv, append(start, f.Index...))
continue
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
if is32Bit {
copyStart := make([]int, len(start))
copy(copyStart, start)
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
opts := getOptions(fieldType.Tag)
if opts.skip {
continue
}
keyName := sft.Name
keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
if opts.omitempty && isEmpty(fieldVal) {
continue
}
if opts.omitzero && isZero(sf) {
if opts.omitzero && isZero(fieldVal) {
continue
}
enc.encode(key.add(keyName), sf)
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(keyName), fieldVal)
}
}
}
if inline {
enc.wf("{")
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
if inline {
enc.wf("}")
}
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
// tomlTypeOfGo returns the TOML type name of the Go value's type.
//
// It is used to determine whether the types of array elements are mixed (which
// is forbidden). If the Go value is nil, then it is illegal for it to be an
// array element, and valueIsNil is returned as true.
//
// The type may be `nil`, which means no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
if rv.Kind() == reflect.Struct {
if rv.Type() == timeType {
return tomlDatetime
}
if isMarshaler(rv) {
return tomlString
}
return tomlHash
}
if isMarshaler(rv) {
return tomlString
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
@ -397,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
if isTableArray(rv) {
return tomlArrayHash
}
return tomlArray
@ -407,54 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
panic("unreachable")
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
func isMarshaler(rv reflect.Value) bool {
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
}
// isTableArray reports if all entries in the array or slice are a table.
func isTableArray(arr reflect.Value) bool {
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
return false
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
ret := true
for i := 0; i < arr.Len(); i++ {
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
// Don't allow nil.
if tt == nil {
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
if ret && !typeEqual(tomlHash, tt) {
ret = false
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
return ret
}
type tagOptions struct {
@ -499,6 +652,8 @@ func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Struct:
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
case reflect.Bool:
return !rv.Bool()
}
@ -511,18 +666,32 @@ func (enc *Encoder) newline() {
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
// Write a key/value pair:
//
// key = <any value>
//
// This is also used for "k = v" in inline tables; so something like this will
// be written in three calls:
//
// ┌────────────────────┐
// │ ┌───┐ ┌─────┐│
// v v v v vv
// key = {k = v, k2 = v2}
//
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
if !inline {
enc.newline()
}
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
}
enc.hasWritten = true
@ -536,13 +705,25 @@ func encPanic(err error) {
panic(tomlEncodeError{err})
}
// Resolve any level of pointers to the actual value (e.g. **string → string).
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
if isMarshaler(v) {
return v
}
if v.CanAddr() { /// Special case for marshalers; see #358.
if pv := v.Addr(); isMarshaler(pv) {
return pv
}
}
return v
}
if v.IsNil() {
return v
}
return eindirect(v.Elem())
}
func isNil(rv reflect.Value) bool {
@ -553,16 +734,3 @@ func isNil(rv reflect.Value) bool {
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View File

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

276
src/runtime/vendor/github.com/BurntSushi/toml/error.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
package toml
import (
"fmt"
"strings"
)
// ParseError is returned when there is an error parsing the TOML syntax.
//
// For example invalid syntax, duplicate keys, etc.
//
// In addition to the error message itself, you can also print detailed location
// information with context by using ErrorWithPosition():
//
// toml: error: Key 'fruit' was already created and cannot be used as an array.
//
// At line 4, column 2-7:
//
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
//
// Furthermore, the ErrorWithUsage() can be used to print the above with some
// more detailed usage guidance:
//
// toml: error: newlines not allowed within inline tables
//
// At line 1, column 18:
//
// 1 | x = [{ key = 42 #
// ^
//
// Error help:
//
// Inline tables must always be on a single line:
//
// table = {key = 42, second = 43}
//
// It is invalid to split them over multiple lines like so:
//
// # INVALID
// table = {
// key = 42,
// second = 43
// }
//
// Use regular for this:
//
// [table]
// key = 42
// second = 43
type ParseError struct {
Message string // Short technical message.
Usage string // Longer message with usage guidance; may be blank.
Position Position // Position of the error
LastKey string // Last parsed key, may be blank.
Line int // Line the error occurred. Deprecated: use Position.
err error
input string
}
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
}
// ErrorWithUsage() returns the error with detailed location context.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithPosition() string {
if pe.input == "" { // Should never happen, but just in case.
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
}
if pe.Position.Line > 1 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
}
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
return b.String()
}
// ErrorWithUsage() returns the error with detailed location context and usage
// guidance.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithUsage() string {
m := pe.ErrorWithPosition()
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
for i := range lines {
if lines[i] != "" {
lines[i] = " " + lines[i]
}
}
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
}
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
errLexInvalidNum struct{ v string }
errLexInvalidDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
errParseRange struct {
i interface{} // int or float
size string // "int64", "uint16", etc.
}
errParseDuration struct{ d string }
)
func (e errLexControl) Error() string {
return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
}
func (e errLexControl) Usage() string { return "" }
func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
func (e errLexInvalidNum) Usage() string { return "" }
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
func (e errLexInvalidDate) Usage() string { return "" }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
func (e errParseRange) Usage() string { return usageIntOverflow }
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
func (e errParseDuration) Usage() string { return usageDuration }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
The following escape sequences are supported:
\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
To prevent a '\' from being recognized as an escape character, use either:
- a ' or '''-delimited string; escape characters aren't processed in them; or
- write two backslashes to get a single backslash: '\\'.
If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
instead of '\' will usually also work: "C:/Users/martin".
`
const usageInlineNewline = `
Inline tables must always be on a single line:
table = {key = 42, second = 43}
It is invalid to split them over multiple lines like so:
# INVALID
table = {
key = 42,
second = 43
}
Use regular for this:
[table]
key = 42
second = 43
`
const usageStringNewline = `
Strings must always be on a single line, and cannot span more than one line:
# INVALID
string = "Hello,
world!"
Instead use """ or ''' to split strings over multiple lines:
string = """Hello,
world!"""
`
const usageIntOverflow = `
This number is too large; this may be an error in the TOML, but it can also be a
bug in the program that uses too small of an integer.
The maximum and minimum values are:
size lowest highest
int8 -128 127
int16 -32,768 32,767
int32 -2,147,483,648 2,147,483,647
int64 -9.2 × 10¹ 9.2 × 10¹
uint8 0 255
uint16 0 65535
uint32 0 4294967295
uint64 0 1.8 × 10¹
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
`
const usageDuration = `
A duration must be as "number<unit>", without any spaces. Valid units are:
ns nanoseconds (billionth of a second)
us, µs microseconds (millionth of a second)
ms milliseconds (thousands of a second)
s seconds
m minutes
h hours
You can combine multiple units; for example "5m10s" for 5 minutes and 10
seconds.
`

3
src/runtime/vendor/github.com/BurntSushi/toml/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/BurntSushi/toml
go 1.16

View File

@ -0,0 +1,36 @@
package internal
import "time"
// Timezones used for local datetime, date, and time TOML types.
//
// The exact way times and dates without a timezone should be interpreted is not
// well-defined in the TOML specification and left to the implementation. These
// defaults to current local timezone offset of the computer, but this can be
// changed by changing these variables before decoding.
//
// TODO:
// Ideally we'd like to offer people the ability to configure the used timezone
// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
// tricky: the reason we use three different variables for this is to support
// round-tripping without these specific TZ names we wouldn't know which
// format to use.
//
// There isn't a good way to encode this right now though, and passing this sort
// of information also ties in to various related issues such as string format
// encoding, encoding of comments, etc.
//
// So, for the time being, just put this in internal until we can write a good
// comprehensive API for doing all of this.
//
// The reason they're exported is because they're referred from in e.g.
// internal/tag.
//
// Note that this behaviour is valid according to the TOML spec as the exact
// behaviour is left up to implementations.
var (
localOffset = func() int { _, o := time.Now().Zone(); return o }()
LocalDatetime = time.FixedZone("datetime-local", localOffset)
LocalDate = time.FixedZone("date-local", localOffset)
LocalTime = time.FixedZone("time-local", localOffset)
)

File diff suppressed because it is too large Load Diff

View File

@ -1,33 +1,40 @@
package toml
import "strings"
import (
"strings"
)
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
// MetaData allows access to meta information about TOML data that's not
// accessible otherwise.
//
// It allows checking if a key is defined in the TOML data, whether any keys
// were undecoded, and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
keyInfo map[string]keyInfo
mapping map[string]interface{}
keys []Key
decoded map[string]struct{}
data []byte // Input file; for errors.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
// IsDefined reports if the key exists in the TOML data.
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
// The key should be specified hierarchically, for example to access the TOML
// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
// Returns false for an empty key.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
var (
hash map[string]interface{}
ok bool
hashOrVal interface{} = md.mapping
)
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
@ -41,58 +48,20 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
// Type will return the empty string if given an empty key or a key that does
// not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
if ki, ok := md.keyInfo[Key(key).String()]; ok {
return ki.tomlType.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific. The list will have the same
// order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
@ -113,9 +82,40 @@ func (md *MetaData) Keys() []Key {
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
if _, ok := md.decoded[key.String()]; !ok {
undecoded = append(undecoded, key)
}
}
return undecoded
}
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// values of this type.
type Key []string
func (k Key) String() string {
ss := make([]string, len(k))
for i := range k {
ss[i] = k.maybeQuoted(i)
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
for _, c := range k[i] {
if !isBareKeyChar(c) {
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}

View File

@ -5,54 +5,69 @@ import (
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/BurntSushi/toml/internal"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
lx *lexer
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
// A list of keys in the order that they appear in the TOML data.
ordered []Key
ordered []Key // List of keys in the order that they appear in the TOML data.
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
mapping map[string]interface{} // Map keyname → key value.
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
type keyInfo struct {
pos Position
tomlType tomlType
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
if pErr, ok := r.(ParseError); ok {
pErr.input = data
err = pErr
return
}
panic(r)
}
}()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
// which mangles stuff.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
data = data[2:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
// file (second byte in surrogate pair being NULL). Again, do this here to
// avoid having to deal with UTF-8/16 stuff in the lexer.
ex := 6
if len(data) < 6 {
ex = len(data)
}
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
}
p = &parser{
keyInfo: make(map[string]keyInfo),
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
implicits: make(map[string]struct{}),
}
for {
item := p.next()
@ -65,20 +80,57 @@ func parse(data string) (p *parser, err error) {
return p, nil
}
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
err: err,
Position: it.pos,
Line: it.pos.Len,
LastKey: p.current(),
})
}
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Line: it.pos.Len,
LastKey: p.current(),
})
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
Line: p.pos.Line,
LastKey: p.current(),
})
}
func (p *parser) next() item {
it := p.lx.nextItem()
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
if it.typ == itemError {
p.panicf("%s", it.val)
if it.err != nil {
panic(ParseError{
Position: it.pos,
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
}
p.panicItemf(it, "%s", it.val)
}
return it
}
func (p *parser) nextPos() item {
it := p.next()
p.pos = it.pos
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@ -97,44 +149,60 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
case itemCommentStart: // # ..
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
case itemTableStart: // [ .. ]
name := p.nextPos()
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(name))
}
p.assertEqual(itemTableEnd, kg.typ)
p.assertEqual(itemTableEnd, name.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.addContext(key, false)
p.setType("", tomlHash, item.pos)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
case itemArrayTableStart: // [[ .. ]]
name := p.nextPos()
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(name))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.assertEqual(itemArrayTableEnd, name.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.addContext(key, true)
p.setType("", tomlArrayHash, item.pos)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
case itemKeyStart: // key = ..
outerContext := p.context
/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
/// The current key is the last part.
p.currentKey = key[len(key)-1]
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set value.
vItem := p.next()
val, typ := p.value(vItem, false)
p.set(p.currentKey, val, typ, vItem.pos)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
@ -148,180 +216,261 @@ func (p *parser) keyString(it item) string {
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
s, _ := p.value(it, false)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
panic("unreachable")
}
var datetimeRepl = strings.NewReplacer(
"z", "Z",
"t", "T",
" ", "T")
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemInteger:
return p.valueInteger(it)
case itemFloat:
return p.valueFloat(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
default:
p.bug("Expected boolean value, but got '%s'.", it.val)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
return p.valueDatetime(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
return p.valueArray(it)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
return p.valueInlineTable(it, parentIsArray)
default:
p.bug("Unexpected value type: %s", it.typ)
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
}
}
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
} else {
p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
}{
{time.RFC3339Nano, time.Local},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
{"2006-01-02", internal.LocalDate},
{"15:04:05.999999999", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
ok bool
err error
)
for _, dt := range dtTypes {
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
}
func (p *parser) valueArray(it item) (interface{}, tomlType) {
p.setType(p.currentKey, tomlArray, it.pos)
var (
types []tomlType
// Initialize to a non-nil empty slice. This makes it consistent with
// how S = [] decodes into a non-nil slice inside something like struct
// { S []string }. See #338
array = []interface{}{}
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it, true)
array = append(array, val)
types = append(types, typ)
// XXX: types isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
}
return array, tomlArray
}
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
prevContext := p.context
p.currentKey = ""
p.addImplicit(p.context)
p.addContext(p.context, parentIsArray)
/// Loop over all table key/value pairs.
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
/// Read all key parts.
k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
p.currentKey = key[len(key)-1]
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set the value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ, it.pos)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[p.currentKey] = val
/// Restore context.
p.context = prevContext
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
return true
}
return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
switch s {
case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
return true
}
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
// isHexadecimal is a superset of all the permissable characters
// surrounding an underscore.
accept = isHexadecimal(r)
}
return accept
}
@ -338,13 +487,12 @@ func numPeriodsOK(s string) bool {
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
// Set the current context of the parser, where the context is either a hash or
// an array of hashes, depending on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
func (p *parser) addContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
@ -383,7 +531,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
hashContext[k] = make([]map[string]interface{}, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
@ -391,8 +539,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
@ -400,15 +547,23 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1])
}
// set calls setValue and setType.
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
p.setValue(key, val)
p.setType(key, typ, pos)
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
var (
tmpHash interface{}
ok bool
hash = p.mapping
keyContext Key
)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
@ -422,24 +577,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
p.panicf("Key '%s' has already been defined.", keyContext)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
// Normally redefining keys isn't allowed, but the key could have been
// defined implicitly and it's allowed to be redefined concretely. (See
// the `valid/implicit-and-explicit-after.toml` in toml-test)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isArray(keyContext) {
p.removeImplicit(keyContext)
hash[key] = value
return
}
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
@ -449,40 +606,39 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
// setType sets the type of a particular value at a given key. It should be
// called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
func (p *parser) setType(key string, typ tomlType, pos Position) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
keyContext = append(keyContext, p.context...)
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
// Special case to make empty keys ("" = 1) work.
// Without it it will set "" rather than `""`.
// TODO: why is this needed? And why is this only needed here?
if len(keyContext) == 0 {
keyContext = Key{""}
}
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
func (p *parser) addImplicitContext(key Key) {
p.addImplicit(key)
p.addContext(key, false)
}
// current returns the full key name of the current context.
@ -497,24 +653,62 @@ func (p *parser) current() string {
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
if len(s) > 0 && s[0] == '\n' {
return s[1:]
}
if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
return s[2:]
}
return s
}
// Remove newlines inside triple-quoted strings if a line ends with "\".
func (p *parser) stripEscapedNewlines(s string) string {
split := strings.Split(s, "\n")
if len(split) < 1 {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
escNL := false // Keep track of the last non-blank line was escaped.
for i, line := range split {
line = strings.TrimRight(line, " \t\r")
if len(line) == 0 || line[len(line)-1] != '\\' {
split[i] = strings.TrimRight(split[i], "\r")
if !escNL && i != len(split)-1 {
split[i] += "\n"
}
continue
}
escBS := true
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
escBS = !escBS
}
if escNL {
line = strings.TrimLeft(line, " \t\r")
}
escNL = !escBS
if escBS {
split[i] += "\n"
continue
}
if i == len(split)-1 {
p.panicf("invalid escape: '\\ '")
}
split[i] = line[:len(line)-1] // Remove \
if len(split)-1 > i {
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
}
}
return strings.Join(esc, "")
return strings.Join(split, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
func (p *parser) replaceEscapes(it item, str string) string {
replaced := make([]rune, 0, len(str))
s := []byte(str)
r := 0
for r < len(s) {
@ -532,7 +726,8 @@ func (p *parser) replaceEscapes(str string) string {
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case ' ', '\t':
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
@ -558,14 +753,14 @@ func (p *parser) replaceEscapes(str string) string {
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
replaced = append(replaced, escaped)
r += 9
}
@ -573,20 +768,14 @@ func (p *parser) replaceEscapes(str string) string {
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View File

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View File

@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
var count map[reflect.Type]int
var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}

View File

@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
func typeIsTable(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -143,6 +144,11 @@ func (f *win32File) Close() error {
return nil
}
// IsClosed checks if the file has been closed
func (f *win32File) IsClosed() bool {
return f.closing.isSet()
}
// prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) {

View File

@ -1,9 +1,8 @@
module github.com/Microsoft/go-winio
go 1.12
go 1.13
require (
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

View File

@ -1,14 +1,11 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package winio
@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
func (conn *HvsockConn) IsClosed() bool {
return conn.sock.IsClosed()
}
func (conn *HvsockConn) shutdown(how int) error {
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
if conn.IsClosed() {
return ErrFileClosed
}
err := syscall.Shutdown(conn.sock.handle, how)
if err != nil {
return os.NewSyscallError("shutdown", err)
}
return nil
}
// CloseRead shuts down the read end of the socket.
// CloseRead shuts down the read end of the socket, preventing future read operations.
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
return nil
}
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
// no more data will be written.
// CloseWrite shuts down the write end of the socket, preventing future write operations and
// notifying the other endpoint that no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {

View File

@ -14,8 +14,6 @@ import (
"encoding/binary"
"fmt"
"strconv"
"golang.org/x/sys/windows"
)
// Variant specifies which GUID variant (or "type") of the GUID. It determines
@ -41,13 +39,6 @@ type Version uint8
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte

View File

@ -0,0 +1,15 @@
// +build !windows
package guid
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type as that is only available to builds
// targeted at `windows`. The representation matches that used by native Windows
// code.
type GUID struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}

View File

@ -0,0 +1,10 @@
package guid
import "golang.org/x/sys/windows"
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID

View File

@ -3,11 +3,10 @@
package security
import (
"fmt"
"os"
"syscall"
"unsafe"
"github.com/pkg/errors"
)
type (
@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
// Stat (to determine if `name` is a directory).
s, err := os.Stat(name)
if err != nil {
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
}
// Get a handle to the file/directory. Must defer Close on success.
@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
sd := uintptr(0)
origDACL := uintptr(0)
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
// And finally use SetSecurityInfo to apply the updated DACL.
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
}
return nil
@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
}
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
if err != nil {
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
}
return fd, nil
}
@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
// Generate pointers to the SIDs based on the string SIDs
sid, err := syscall.StringToSid(sidVmGroup)
if err != nil {
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
}
inheritance := inheritModeNoInheritance
@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
modifiedDACL := uintptr(0)
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
}
return modifiedDACL, nil

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package vhd
@ -7,14 +8,13 @@ import (
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
Version2 OpenVersion2
}
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
type openVersion2 struct {
getInfoOnly int32
readOnly int32
resiliencyGUID guid.GUID
}
type openVirtualDiskParameters struct {
version uint32
version2 openVersion2
}
type AttachVersion2 struct {
RestrictedOffset uint64
RestrictedLength uint64
}
type AttachVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version uint32
Version2 AttachVersion2
}
@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
return err
}
if err := syscall.CloseHandle(handle); err != nil {
return err
}
return nil
return syscall.CloseHandle(handle)
}
// DetachVirtualDisk detaches a virtual hard disk by handle.
func DetachVirtualDisk(handle syscall.Handle) (err error) {
if err := detachVirtualDisk(handle, 0, 0); err != nil {
return errors.Wrap(err, "failed to detach virtual disk")
return fmt.Errorf("failed to detach virtual disk: %w", err)
}
return nil
}
@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
parameters,
nil,
); err != nil {
return errors.Wrap(err, "failed to attach virtual disk")
return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
AttachVirtualDiskFlagNone,
&params,
); err != nil {
return errors.Wrap(err, "failed to attach virtual disk")
return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
var (
handle syscall.Handle
defaultType VirtualStorageType
getInfoOnly int32
readOnly int32
)
if parameters.Version != 2 {
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
if parameters.Version2.GetInfoOnly {
getInfoOnly = 1
}
if parameters.Version2.ReadOnly {
readOnly = 1
}
params := &openVirtualDiskParameters{
version: parameters.Version,
version2: openVersion2{
getInfoOnly,
readOnly,
parameters.Version2.ResiliencyGUID,
},
}
if err := openVirtualDisk(
&defaultType,
vhdPath,
uint32(virtualDiskAccessMask),
uint32(openVirtualDiskFlags),
parameters,
params,
&handle,
); err != nil {
return 0, errors.Wrap(err, "failed to open virtual disk")
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
}
return handle, nil
}
@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
nil,
&handle,
); err != nil {
return handle, errors.Wrap(err, "failed to create virtual disk")
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
}
return handle, nil
}
@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
&diskPathSizeInBytes,
&diskPhysicalPathBuf[0],
); err != nil {
return "", errors.Wrap(err, "failed to get disk physical path")
return "", fmt.Errorf("failed to get disk physical path: %w", err)
}
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
}
@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
createParams,
)
if err != nil {
return fmt.Errorf("failed to create differencing vhd: %s", err)
return fmt.Errorf("failed to create differencing vhd: %w", err)
}
if err := syscall.CloseHandle(vhdHandle); err != nil {
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
}
return nil
}

View File

@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint
return
}
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
var _p0 *uint16
_p0, win32err = syscall.UTF16PtrFromString(path)
if win32err != nil {
@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
}
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
if r0 != 0 {
win32err = syscall.Errno(r0)

View File

@ -1,17 +1,17 @@
/*
Copyright 2019 The containerd Authors.
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: api.proto

View File

@ -1,6 +1,6 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@ -192,7 +192,7 @@
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -0,0 +1,122 @@
package annotations
const (
// Annotations carries the received Kubelet annotations.
Annotations = "io.kubernetes.cri-o.Annotations"
// ContainerID is the container ID annotation.
ContainerID = "io.kubernetes.cri-o.ContainerID"
// ContainerName is the container name annotation.
ContainerName = "io.kubernetes.cri-o.ContainerName"
// ContainerType is the container type (sandbox or container) annotation.
ContainerType = "io.kubernetes.cri-o.ContainerType"
// Created is the container creation time annotation.
Created = "io.kubernetes.cri-o.Created"
// HostName is the container host name annotation.
HostName = "io.kubernetes.cri-o.HostName"
// CgroupParent is the sandbox cgroup parent.
CgroupParent = "io.kubernetes.cri-o.CgroupParent"
// IP is the container ipv4 or ipv6 address.
IP = "io.kubernetes.cri-o.IP"
// NamespaceOptions store the options for namespaces.
NamespaceOptions = "io.kubernetes.cri-o.NamespaceOptions"
// SeccompProfilePath is the node seccomp profile path.
SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath"
// Image is the container image ID annotation.
Image = "io.kubernetes.cri-o.Image"
// ImageName is the container image name annotation.
ImageName = "io.kubernetes.cri-o.ImageName"
// ImageRef is the container image ref annotation.
ImageRef = "io.kubernetes.cri-o.ImageRef"
// KubeName is the kubernetes name annotation.
KubeName = "io.kubernetes.cri-o.KubeName"
// PortMappings holds the port mappings for the sandbox.
PortMappings = "io.kubernetes.cri-o.PortMappings"
// Labels are the kubernetes labels annotation.
Labels = "io.kubernetes.cri-o.Labels"
// LogPath is the container logging path annotation.
LogPath = "io.kubernetes.cri-o.LogPath"
// Metadata is the container metadata annotation.
Metadata = "io.kubernetes.cri-o.Metadata"
// Name is the pod name annotation.
Name = "io.kubernetes.cri-o.Name"
// Namespace is the pod namespace annotation.
Namespace = "io.kubernetes.cri-o.Namespace"
// PrivilegedRuntime is the annotation for the privileged runtime path.
PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime"
// ResolvPath is the resolver configuration path annotation.
ResolvPath = "io.kubernetes.cri-o.ResolvPath"
// HostnamePath is the path to /etc/hostname to bind mount annotation.
HostnamePath = "io.kubernetes.cri-o.HostnamePath"
// SandboxID is the sandbox ID annotation.
SandboxID = "io.kubernetes.cri-o.SandboxID"
// SandboxName is the sandbox name annotation.
SandboxName = "io.kubernetes.cri-o.SandboxName"
// ShmPath is the shared memory path annotation.
ShmPath = "io.kubernetes.cri-o.ShmPath"
// MountPoint is the mount point of the container rootfs.
MountPoint = "io.kubernetes.cri-o.MountPoint"
// RuntimeHandler is the annotation for runtime handler.
RuntimeHandler = "io.kubernetes.cri-o.RuntimeHandler"
// TTY is the terminal path annotation.
TTY = "io.kubernetes.cri-o.TTY"
// Stdin is the stdin annotation.
Stdin = "io.kubernetes.cri-o.Stdin"
// StdinOnce is the stdin_once annotation.
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
// Volumes is the volumes annotation.
Volumes = "io.kubernetes.cri-o.Volumes"
// HostNetwork indicates whether the host network namespace is used or not.
HostNetwork = "io.kubernetes.cri-o.HostNetwork"
// CNIResult is the JSON string representation of the Result from CNI.
CNIResult = "io.kubernetes.cri-o.CNIResult"
// ContainerManager is the annotation key for indicating the creator and
// manager of the container.
ContainerManager = "io.container.manager"
)
// ContainerType values
const (
// ContainerTypeSandbox represents a pod sandbox container.
ContainerTypeSandbox = "sandbox"
// ContainerTypeContainer represents a container running within a pod.
ContainerTypeContainer = "container"
)
// ContainerManagerLibpod indicates that libpod created and manages the
// container.
const ContainerManagerLibpod = "libpod"

View File

@ -15,7 +15,7 @@ type roffRenderer struct {
extensions blackfriday.Extensions
listCounters []int
firstHeader bool
defineTerm bool
firstDD bool
listDepth int
}
@ -42,7 +42,8 @@ const (
quoteCloseTag = "\n.RE\n"
listTag = "\n.RS\n"
listCloseTag = "\n.RE\n"
arglistTag = "\n.TP\n"
dtTag = "\n.TP\n"
dd2Tag = "\n"
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
switch node.Type {
case blackfriday.Text:
r.handleText(w, node, entering)
escapeSpecialChars(w, node.Literal)
case blackfriday.Softbreak:
out(w, crTag)
case blackfriday.Hardbreak:
@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
out(w, codeCloseTag)
case blackfriday.Table:
r.handleTable(w, node, entering)
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.TableHead:
case blackfriday.TableBody:
case blackfriday.TableRow:
// no action as cell entries do all the nroff formatting
return blackfriday.GoToNext
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.HTMLSpan:
// ignore other HTML tags
default:
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
}
return walkAction
}
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
var (
start, end string
)
// handle special roff table cell text encapsulation
if node.Parent.Type == blackfriday.TableCell {
if len(node.Literal) > 30 {
start = tableCellStart
end = tableCellEnd
} else {
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
if node.Parent.Next == nil && !node.Parent.IsHeader {
end = crTag
}
}
}
out(w, start)
escapeSpecialChars(w, node.Literal)
out(w, end)
}
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
switch node.Level {
@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
r.listCounters[len(r.listCounters)-1]++
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
// DT (definition term): line just before DD (see below).
out(w, dtTag)
r.firstDD = true
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// state machine for handling terms and following definitions
// since blackfriday does not distinguish them properly, nor
// does it seperate them into separate lists as it should
if !r.defineTerm {
out(w, arglistTag)
r.defineTerm = true
// DD (definition description): line that starts with ": ".
//
// We have to distinguish between the first DD and the
// subsequent ones, as there should be no vertical
// whitespace between the DT and the first DD.
if r.firstDD {
r.firstDD = false
} else {
r.defineTerm = false
out(w, dd2Tag)
}
} else {
out(w, ".IP \\(bu 2\n")
@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
out(w, tableStart)
//call walker to count cells (and rows?) so format section can be produced
// call walker to count cells (and rows?) so format section can be produced
columns := countColumns(node)
out(w, strings.Repeat("l ", columns)+"\n")
out(w, strings.Repeat("l ", columns)+".\n")
@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering
}
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
var (
start, end string
)
if node.IsHeader {
start = codespanTag
end = codespanCloseTag
}
if entering {
var start string
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
out(w, "\t"+start)
} else {
out(w, start)
start = "\t"
}
if node.IsHeader {
start += codespanTag
} else if nodeLiteralSize(node) > 30 {
start += tableCellStart
}
out(w, start)
} else {
// need to carriage return if we are at the end of the header row
if node.IsHeader && node.Next == nil {
end = end + crTag
var end string
if node.IsHeader {
end = codespanCloseTag
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
end += crTag
}
out(w, end)
}
}
func nodeLiteralSize(node *blackfriday.Node) int {
total := 0
for n := node.FirstChild; n != nil; n = n.FirstChild {
total += len(n.Literal)
}
return total
}
// because roff format requires knowing the column count before outputting any table
// data we need to walk a table tree and count the columns
func countColumns(node *blackfriday.Node) int {
@ -309,15 +309,6 @@ func out(w io.Writer, output string) {
io.WriteString(w, output) // nolint: errcheck
}
func needsBackslash(c byte) bool {
for _, r := range []byte("-_&\\~") {
if c == r {
return true
}
}
return false
}
func escapeSpecialChars(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ {
// escape initial apostrophe or period
@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) {
// directly copy normal characters
org := i
for i < len(text) && !needsBackslash(text[i]) {
for i < len(text) && text[i] != '\\' {
i++
}
if i > org {

View File

@ -1,93 +0,0 @@
package annotations
const (
// Annotations carries the received Kubelet annotations
Annotations = "io.kubernetes.cri-o.Annotations"
// ContainerID is the container ID annotation
ContainerID = "io.kubernetes.cri-o.ContainerID"
// ContainerName is the container name annotation
ContainerName = "io.kubernetes.cri-o.ContainerName"
// ContainerType is the container type (sandbox or container) annotation
ContainerType = "io.kubernetes.cri-o.ContainerType"
// Created is the container creation time annotation
Created = "io.kubernetes.cri-o.Created"
// HostName is the container host name annotation
HostName = "io.kubernetes.cri-o.HostName"
// IP is the container ipv4 or ipv6 address
IP = "io.kubernetes.cri-o.IP"
// Image is the container image ID annotation
Image = "io.kubernetes.cri-o.Image"
// ImageName is the container image name annotation
ImageName = "io.kubernetes.cri-o.ImageName"
// ImageRef is the container image ref annotation
ImageRef = "io.kubernetes.cri-o.ImageRef"
// KubeName is the kubernetes name annotation
KubeName = "io.kubernetes.cri-o.KubeName"
// Labels are the kubernetes labels annotation
Labels = "io.kubernetes.cri-o.Labels"
// LogPath is the container logging path annotation
LogPath = "io.kubernetes.cri-o.LogPath"
// Metadata is the container metadata annotation
Metadata = "io.kubernetes.cri-o.Metadata"
// Name is the pod name annotation
Name = "io.kubernetes.cri-o.Name"
// PrivilegedRuntime is the annotation for the privileged runtime path
PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime"
// ResolvPath is the resolver configuration path annotation
ResolvPath = "io.kubernetes.cri-o.ResolvPath"
// HostnamePath is the path to /etc/hostname to bind mount annotation
HostnamePath = "io.kubernetes.cri-o.HostnamePath"
// SandboxID is the sandbox ID annotation
SandboxID = "io.kubernetes.cri-o.SandboxID"
// SandboxName is the sandbox name annotation
SandboxName = "io.kubernetes.cri-o.SandboxName"
// ShmPath is the shared memory path annotation
ShmPath = "io.kubernetes.cri-o.ShmPath"
// MountPoint is the mount point of the container rootfs
MountPoint = "io.kubernetes.cri-o.MountPoint"
// TrustedSandbox is the annotation for trusted sandboxes
TrustedSandbox = "io.kubernetes.cri-o.TrustedSandbox"
// TTY is the terminal path annotation
TTY = "io.kubernetes.cri-o.TTY"
// Stdin is the stdin annotation
Stdin = "io.kubernetes.cri-o.Stdin"
// StdinOnce is the stdin_once annotation
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
// Volumes is the volumes annotatoin
Volumes = "io.kubernetes.cri-o.Volumes"
)
// ContainerType values
const (
// ContainerTypeSandbox represents a pod sandbox container
ContainerTypeSandbox = "sandbox"
// ContainerTypeContainer represents a container running within a pod
ContainerTypeContainer = "container"
)

View File

@ -4,10 +4,12 @@
language: go
go:
- 1.7.x
- 1.8.x
- 1.13.x
- 1.16.x
- tip
arch:
- AMD64
- ppc64le
os:
- linux
- osx

View File

@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
> **NOTE**: This code is *only* safe if you are not at risk of other processes
> modifying path components after you've used `SecureJoin`. If it is possible
> for a malicious process to modify path components of the resolved path, then
> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
> are some Linux kernel patches I'm working on which might allow for a better
> solution.][lwn-obeneath]
>
> In addition, with a slightly modified API it might be possible to use
> `O_PATH` and verify that the opened path is actually the resolved one -- but
> I have not done that yet. I might add it in the future as a helper function
> to help users verify the path (we can't just return `/proc/self/fd/<foo>`
> because that doesn't always work transparently for all users).
This is the function prototype:
```go
@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error)
This library **guarantees** the following:
* If no error is set, the resulting string **must** be a child path of
`SecureJoin` and will not contain any symlink path components (they will all
be expanded).
`root` and will not contain any symlink path components (they will all be
expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
@ -25,7 +38,7 @@ This library **guarantees** the following:
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
* Non-existant path components are unaffected by `SecureJoin` (similar to
* Non-existent path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) {
}
```
[lwn-obeneath]: https://lwn.net/Articles/767547/
[go#20126]: https://github.com/golang/go/issues/20126
### License ###

View File

@ -1 +1 @@
0.2.2
0.2.3

View File

@ -0,0 +1,3 @@
module github.com/cyphar/filepath-securejoin
go 1.13

View File

@ -12,39 +12,20 @@ package securejoin
import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/errors"
)
// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
// evaluated in attempting to securely join the two given paths.
var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
// effectively a more broad version of os.IsNotExist.
func IsNotExist(err error) bool {
// If it's a bone-fide ENOENT just bail.
if os.IsNotExist(errors.Cause(err)) {
return true
}
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
var errno error
switch err := errors.Cause(err).(type) {
case *os.PathError:
errno = err.Err
case *os.LinkError:
errno = err.Err
case *os.SyscallError:
errno = err.Err
}
return errno == syscall.ENOTDIR || errno == syscall.ENOENT
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
// SecureJoinVFS joins the two given path components (similar to Join) except
@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
n := 0
for unsafePath != "" {
if n > 255 {
return "", ErrSymlinkLoop
return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP}
}
// Next path component, p.

View File

@ -1 +0,0 @@
github.com/pkg/errors v0.8.0

View File

@ -0,0 +1,2 @@
Chris Howey <howeyc@gmail.com> <chris@howey.me>
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>

View File

@ -1,36 +0,0 @@
sudo: false
language: go
go:
- "stable"
- "1.11.x"
- "1.10.x"
- "1.9.x"
matrix:
include:
- go: "stable"
env: GOLINT=true
allow_failures:
- go: tip
fast_finish: true
before_install:
- if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
script:
- go test --race ./...
after_script:
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
- go vet ./...
os:
- linux
- osx
- windows
notifications:
email: false

View File

@ -4,35 +4,44 @@
# You can update this list using the following command:
#
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
# Please keep the list sorted.
Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Alexey Kazakov <alkazako@redhat.com>
Amit Krishnan <amit.krishnan@oracle.com>
Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <chris@howey.me> <howeyc@gmail.com>
Chris Howey <howeyc@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Daniel Wagner-Hall <dawagner@gmail.com>
Dave Cheney <dave@cheney.net>
Eric Lin <linxiulei@gmail.com>
Evan Phoenix <evan@fallingsnow.net>
Francisco Souza <f@souza.cc>
Gautam Dey <gautam.dey77@gmail.com>
Hari haran <hariharan.uno@gmail.com>
John C Barstow
Ichinose Shogo <shogo82148@gmail.com>
Johannes Ebke <johannes@ebke.org>
John C Barstow <jbowtie@amathaine.com>
Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Matthias Stone <matthias@bellstone.ca>
Nathan Youngman <git@nathany.com>
Nickolai Zeldovich <nickolai@csail.mit.edu>
Oliver Bristow <evilumbrella+github@gmail.com>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk>
Pratik Shinde <pratikshinde320@gmail.com>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
@ -41,6 +50,7 @@ Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
Tobias Klauser <tobias.klauser@gmail.com>
Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>

View File

@ -1,6 +1,46 @@
# Changelog
## v1.4.7 / 2018-01-09
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.5.4] - 2022-04-25
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
## [1.5.3] - 2022-04-22
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
## [1.5.2] - 2022-04-21
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
## [1.5.1] - 2021-08-24
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
## [1.5.0] - 2021-08-20
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
[#378](https://github.com/fsnotify/fsnotify/pull/378)
[#381](https://github.com/fsnotify/fsnotify/pull/381)
[#385](https://github.com/fsnotify/fsnotify/pull/385)
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
## [1.4.7] - 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
@ -10,62 +50,62 @@
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## v1.4.2 / 2016-10-10
## [1.4.2] - 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## v1.4.1 / 2016-10-04
## [1.4.1] - 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## v1.4.0 / 2016-10-01
## [1.4.0] - 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## v1.3.1 / 2016-06-28
## [1.3.1] - 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## v1.3.0 / 2016-04-19
## [1.3.0] - 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## v1.2.10 / 2016-03-02
## [1.2.10] - 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## v1.2.9 / 2016-01-13
## [1.2.9] - 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## v1.2.8 / 2015-12-17
## [1.2.8] - 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## v1.2.5 / 2015-10-17
## [1.2.5] - 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## v1.2.1 / 2015-10-14
## [1.2.1] - 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## v1.2.0 / 2015-02-08
## [1.2.0] - 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## v1.1.1 / 2015-02-05
## [1.1.1] - 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## v1.1.0 / 2014-12-12
## [1.1.0] - 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
@ -77,22 +117,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v1.0.4 / 2014-09-07
## [1.0.4] - 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## v1.0.3 / 2014-08-19
## [1.0.3] - 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## v1.0.2 / 2014-08-17
## [1.0.2] - 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## v1.0.0 / 2014-08-15
## [1.0.0] - 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
@ -146,51 +186,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## v0.9.3 / 2014-12-31
## [0.9.3] - 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v0.9.2 / 2014-08-17
## [0.9.2] - 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## v0.9.1 / 2014-06-12
## [0.9.1] - 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## v0.9.0 / 2014-01-17
## [0.9.0] - 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## v0.8.12 / 2013-11-13
## [0.8.12] - 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## v0.8.11 / 2013-11-02
## [0.8.11] - 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
## [0.8.10] - 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## v0.8.9 / 2013-09-08
## [0.8.9] - 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## v0.8.8 / 2013-06-17
## [0.8.8] - 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## v0.8.7 / 2013-06-03
## [0.8.7] - 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
@ -198,74 +238,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* [Fix] tests on Windows
* lower case error messages
## v0.8.6 / 2013-05-23
## [0.8.6] - 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## v0.8.5 / 2013-05-09
## [0.8.5] - 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## v0.8.4 / 2013-04-07
## [0.8.4] - 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## v0.8.3 / 2013-03-13
## [0.8.3] - 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## v0.8.2 / 2013-02-07
## [0.8.2] - 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## v0.8.1 / 2013-01-09
## [0.8.1] - 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## v0.8.0 / 2012-11-09
## [0.8.0] - 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## v0.7.4 / 2012-10-09
## [0.7.4] - 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## v0.7.3 / 2012-09-27
## [0.7.3] - 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## v0.7.2 / 2012-09-01
## [0.7.2] - 2012-09-01
* kqueue: events for created directories
## v0.7.1 / 2012-07-14
## [0.7.1] - 2012-07-14
* [Fix] for renaming files
## v0.7.0 / 2012-07-02
## [0.7.0] - 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## v0.6.0 / 2012-06-06
## [0.6.0] - 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## v0.5.1 / 2012-05-22
## [0.5.1] - 2012-05-22
* [Fix] inotify: remove all watches before Close()
## v0.5.0 / 2012-05-03
## [0.5.0] - 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
@ -273,22 +313,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## v0.4.0 / 2012-03-30
## [0.4.0] - 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## v0.3.0 / 2012-02-19
## [0.3.0] - 2012-02-19
* kqueue: add files when watch directory
## v0.2.0 / 2011-12-30
## [0.2.0] - 2011-12-30
* update to latest Go weekly code
## v0.1.0 / 2011-10-19
## [0.1.0] - 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event

View File

@ -48,18 +48,6 @@ fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Win
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
@ -67,11 +55,6 @@ Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

View File

@ -1,37 +1,31 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
Cross platform: Windows, Linux, BSD and macOS.
| Adapter | OS | Status |
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
| inotify | Linux 2.6.27 or later, Android\* | Supported |
| kqueue | BSD, macOS, iOS\* | Supported |
| ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
## Usage
@ -84,10 +78,6 @@ func main() {
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build solaris
// +build solaris
package fsnotify

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !plan9
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.

View File

@ -0,0 +1,36 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
package fsnotify
import (
"fmt"
"runtime"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct{}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View File

@ -1,5 +1,10 @@
module github.com/fsnotify/fsnotify
go 1.13
go 1.16
require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
require golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
retract (
v1.5.3 // Published an incorrect branch accidentally https://github.com/fsnotify/fsnotify/issues/445
v1.5.0 // Contains symlink regression https://github.com/fsnotify/fsnotify/pull/394
)

View File

@ -1,2 +1,2 @@
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package fsnotify
@ -162,6 +163,19 @@ func (w *Watcher) Remove(name string) error {
return nil
}
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
@ -272,7 +286,7 @@ func (w *Watcher) readEvents() {
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package fsnotify
@ -37,7 +38,6 @@ func newFdPoller(fd int) (*fdPoller, error) {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
@ -147,6 +148,19 @@ func (w *Watcher) Remove(name string) error {
return nil
}
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || openbsd || netbsd || dragonfly
// +build freebsd openbsd netbsd dragonfly
package fsnotify

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin
// +build darwin
package fsnotify

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
// +build windows
package fsnotify
@ -11,6 +12,7 @@ import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"sync"
"syscall"
@ -95,6 +97,21 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
entries = append(entries, watchEntry.path)
}
}
return entries
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
@ -451,8 +468,16 @@ func (w *Watcher) readEvents() {
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
// TODO: Consider using unsafe.Slice that is available from go1.17
// https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
// instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
size := int(raw.FileNameLength / 2)
var buf []uint16
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
sh.Len = size
sh.Cap = size
name := syscall.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
var mask uint64

View File

@ -176,9 +176,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
} else {
conn.uuid = string(s[1])
return nil, true
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForData:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
@ -191,9 +192,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
} else {
conn.uuid = string(s[1])
return nil, true
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForOk && string(s[0]) == "DATA":
err = authWriteLine(conn.transport, []byte("DATA"))
if err != nil {

View File

@ -169,7 +169,7 @@ func Connect(address string, opts ...ConnOption) (*Conn, error) {
// SystemBusPrivate returns a new private connection to the system bus.
// Note: this connection is not ready to use. One must perform Auth and Hello
// on the connection before it is useable.
// on the connection before it is usable.
func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
return Dial(getSystemBusPlatformAddress(), opts...)
}
@ -284,10 +284,6 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.ctx = context.Background()
}
conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx)
go func() {
<-conn.ctx.Done()
conn.Close()
}()
conn.calls = newCallTracker()
if conn.handler == nil {
@ -302,6 +298,11 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.outHandler = &outputHandler{conn: conn}
conn.names = newNameTracker()
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
go func() {
<-conn.ctx.Done()
conn.Close()
}()
return conn, nil
}
@ -550,6 +551,11 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call {
call.ctx = ctx
call.ctxCanceler = canceler
conn.calls.track(msg.serial, call)
if ctx.Err() != nil {
// short path: don't even send the message if context already cancelled
conn.calls.handleSendError(msg, ctx.Err())
return call
}
go func() {
<-ctx.Done()
conn.calls.handleSendError(msg, ctx.Err())
@ -649,7 +655,9 @@ func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...Match
// Signal registers the given channel to be passed all received signal messages.
//
// Multiple of these channels can be registered at the same time.
// Multiple of these channels can be registered at the same time. The channel is
// closed if the Conn is closed; it should not be closed by the caller before
// RemoveSignal was called on it.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
@ -765,7 +773,12 @@ func getKey(s, key string) string {
for _, keyEqualsValue := range strings.Split(s, ",") {
keyValue := strings.SplitN(keyEqualsValue, "=", 2)
if len(keyValue) == 2 && keyValue[0] == key {
return keyValue[1]
val, err := UnescapeBusAddressValue(keyValue[1])
if err != nil {
// No way to return an error.
return ""
}
return val
}
}
return ""

View File

@ -54,7 +54,7 @@ func tryDiscoverDbusSessionBusAddress() string {
if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) {
// if /run/user/<uid>/bus exists, that file itself
// *is* the unix socket, so return its path
return fmt.Sprintf("unix:path=%s", runUserBusFile)
return fmt.Sprintf("unix:path=%s", EscapeBusAddressValue(runUserBusFile))
}
if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) {
// if /run/user/<uid>/dbus-session exists, it's a
@ -85,9 +85,6 @@ func getRuntimeDirectory() (string, error) {
}
func fileExists(filename string) bool {
if _, err := os.Stat(filename); !os.IsNotExist(err) {
return true
} else {
return false
}
_, err := os.Stat(filename)
return !os.IsNotExist(err)
}

View File

@ -122,8 +122,11 @@ func isConvertibleTo(dest, src reflect.Type) bool {
case dest.Kind() == reflect.Slice:
return src.Kind() == reflect.Slice &&
isConvertibleTo(dest.Elem(), src.Elem())
case dest.Kind() == reflect.Ptr:
dest = dest.Elem()
return isConvertibleTo(dest, src)
case dest.Kind() == reflect.Struct:
return src == interfacesType
return src == interfacesType || dest.Kind() == src.Kind()
default:
return src.ConvertibleTo(dest)
}
@ -274,13 +277,8 @@ func storeSliceIntoInterface(dest, src reflect.Value) error {
func storeSliceIntoSlice(dest, src reflect.Value) error {
if dest.IsNil() || dest.Len() < src.Len() {
dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
}
if dest.Len() != src.Len() {
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"slices are different lengths "+
"need: %d have: %d",
src.Len(), dest.Len())
} else if dest.Len() > src.Len() {
dest.Set(dest.Slice(0, src.Len()))
}
for i := 0; i < src.Len(); i++ {
err := store(dest.Index(i), getVariantValue(src.Index(i)))

View File

@ -10,8 +10,10 @@ value.
Conversion Rules
For outgoing messages, Go types are automatically converted to the
corresponding D-Bus types. The following types are directly encoded as their
respective D-Bus equivalents:
corresponding D-Bus types. See the official specification at
https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more
information on the D-Bus type system. The following types are directly encoded
as their respective D-Bus equivalents:
Go type | D-Bus type
------------+-----------
@ -39,8 +41,8 @@ Maps encode as DICTs, provided that their key type can be used as a key for
a DICT.
Structs other than Variant and Signature encode as a STRUCT containing their
exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
be skipped.
exported fields in order. Fields whose tags contain `dbus:"-"` and unexported
fields will be skipped.
Pointers encode as the value they're pointed to.

84
src/runtime/vendor/github.com/godbus/dbus/v5/escape.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package dbus
import "net/url"
// EscapeBusAddressValue implements a requirement to escape the values
// in D-Bus server addresses, as defined by the D-Bus specification at
// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
func EscapeBusAddressValue(val string) string {
toEsc := strNeedsEscape(val)
if toEsc == 0 {
// Avoid unneeded allocation/copying.
return val
}
// Avoid allocation for short paths.
var buf [64]byte
var out []byte
// Every to-be-escaped byte needs 2 extra bytes.
required := len(val) + 2*toEsc
if required <= len(buf) {
out = buf[:required]
} else {
out = make([]byte, required)
}
j := 0
for i := 0; i < len(val); i++ {
if ch := val[i]; needsEscape(ch) {
// Convert ch to %xx, where xx is hex value.
out[j] = '%'
out[j+1] = hexchar(ch >> 4)
out[j+2] = hexchar(ch & 0x0F)
j += 3
} else {
out[j] = ch
j++
}
}
return string(out)
}
// UnescapeBusAddressValue unescapes values in D-Bus server addresses,
// as defined by the D-Bus specification at
// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
func UnescapeBusAddressValue(val string) (string, error) {
// Looks like url.PathUnescape does exactly what is required.
return url.PathUnescape(val)
}
// hexchar returns an octal representation of a n, where n < 16.
// For invalid values of n, the function panics.
func hexchar(n byte) byte {
const hex = "0123456789abcdef"
// For n >= len(hex), runtime will panic.
return hex[n]
}
// needsEscape tells if a byte is NOT one of optionally-escaped bytes.
func needsEscape(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '/', '\\', '.', '*':
return false
}
return true
}
// strNeedsEscape tells how many bytes in the string need escaping.
func strNeedsEscape(val string) int {
count := 0
for i := 0; i < len(val); i++ {
if needsEscape(val[i]) {
count++
}
}
return count
}

View File

@ -3,6 +3,7 @@ package dbus
import (
"errors"
"fmt"
"os"
"reflect"
"strings"
)
@ -209,28 +210,23 @@ func (conn *Conn) handleCall(msg *Message) {
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
conn.sendMessageAndIfClosed(reply, nil)
if err := reply.IsValid(); err != nil {
fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err)
} else {
conn.sendMessageAndIfClosed(reply, nil)
}
}
}
// Emit emits the given signal on the message bus. The name parameter must be
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
if !path.IsValid() {
return errors.New("dbus: invalid object path")
}
i := strings.LastIndex(name, ".")
if i == -1 {
return errors.New("dbus: invalid method name")
}
iface := name[:i]
member := name[i+1:]
if !isValidMember(member) {
return errors.New("dbus: invalid method name")
}
if !isValidInterface(iface) {
return errors.New("dbus: invalid interface name")
}
msg := new(Message)
msg.Type = TypeSignal
msg.Headers = make(map[HeaderField]Variant)
@ -241,6 +237,9 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
if err := msg.IsValid(); err != nil {
return err
}
var closed bool
conn.sendMessageAndIfClosed(msg, func() {

View File

@ -2,27 +2,24 @@ package dbus
import (
"os"
"sync"
)
var (
homeDir string
homeDirLock sync.Mutex
"os/user"
)
// Get returns the home directory of the current user, which is usually the
// value of HOME environment variable. In case it is not set or empty, os/user
// package is used.
//
// If linking statically with cgo enabled against glibc, make sure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func getHomeDir() string {
homeDirLock.Lock()
defer homeDirLock.Unlock()
homeDir := os.Getenv("HOME")
if homeDir != "" {
return homeDir
}
homeDir = os.Getenv("HOME")
if homeDir != "" {
return homeDir
if u, err := user.Current(); err == nil {
return u.HomeDir
}
homeDir = lookupHomeDir()
return homeDir
return "/"
}

View File

@ -1,15 +0,0 @@
// +build !static_build
package dbus
import (
"os/user"
)
func lookupHomeDir() string {
u, err := user.Current()
if err != nil {
return "/"
}
return u.HomeDir
}

View File

@ -1,45 +0,0 @@
// +build static_build
package dbus
import (
"bufio"
"os"
"strconv"
"strings"
)
func lookupHomeDir() string {
myUid := os.Getuid()
f, err := os.Open("/etc/passwd")
if err != nil {
return "/"
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
break
}
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
parts := strings.Split(line, ":")
if len(parts) >= 6 {
uid, err := strconv.Atoi(parts[2])
if err == nil && uid == myUid {
return parts[5]
}
}
}
// Default to / if we can't get a better value
return "/"
}

View File

@ -208,7 +208,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) {
// The possibly returned error can be an error of the underlying reader, an
// InvalidMessageError or a FormatError.
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
return DecodeMessageWithFDs(rd, make([]int, 0));
return DecodeMessageWithFDs(rd, make([]int, 0))
}
type nullwriter struct{}
@ -227,8 +227,8 @@ func (msg *Message) CountFds() (int, error) {
}
func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) {
if err := msg.IsValid(); err != nil {
return make([]int, 0), err
if err := msg.validateHeader(); err != nil {
return nil, err
}
var vs [7]interface{}
switch order {
@ -237,7 +237,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds
case binary.BigEndian:
vs[0] = byte('B')
default:
return make([]int, 0), errors.New("dbus: invalid byte order")
return nil, errors.New("dbus: invalid byte order")
}
body := new(bytes.Buffer)
fds = make([]int, 0)
@ -284,8 +284,13 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error)
}
// IsValid checks whether msg is a valid message and returns an
// InvalidMessageError if it is not.
// InvalidMessageError or FormatError if it is not.
func (msg *Message) IsValid() error {
var b bytes.Buffer
return msg.EncodeTo(&b, nativeEndian)
}
func (msg *Message) validateHeader() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
return InvalidMessageError("invalid flags")
}
@ -330,6 +335,7 @@ func (msg *Message) IsValid() error {
return InvalidMessageError("missing signature")
}
}
return nil
}

View File

@ -63,7 +63,7 @@ type Method interface {
// any other decoding scheme.
type ArgumentDecoder interface {
// To decode the arguments of a method the sender and message are
// provided incase the semantics of the implementer provides access
// provided in case the semantics of the implementer provides access
// to these as part of the method invocation.
DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
}

View File

@ -102,7 +102,7 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) {
}
}
if len(s) == 0 {
panic("empty struct")
panic(InvalidTypeError{t})
}
return "(" + s + ")"
case reflect.Array, reflect.Slice:

View File

@ -154,17 +154,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
switch v.(type) {
switch index := v.(type) {
case UnixFDIndex:
j := v.(UnixFDIndex)
if uint32(j) >= unixfds {
if uint32(index) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
msg.Body[i] = UnixFD(fds[j])
msg.Body[i] = UnixFD(fds[index])
case []UnixFDIndex:
idxArray := v.([]UnixFDIndex)
fdArray := make([]UnixFD, len(idxArray))
for k, j := range idxArray {
fdArray := make([]UnixFD, len(index))
for k, j := range index {
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}

View File

@ -0,0 +1,6 @@
package dbus
func (t *unixTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}

View File

@ -49,7 +49,7 @@ func ParseVariant(s string, sig Signature) (Variant, error) {
}
// format returns a formatted version of v and whether this string can be parsed
// unambigously.
// unambiguously.
func (v Variant) format() (string, bool) {
switch v.sig.str[0] {
case 'b', 'i':

View File

@ -0,0 +1,524 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONUnmarshalV2 = false
// UnmarshalNext unmarshals the next JSON object from d into m.
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
return new(Unmarshaler).UnmarshalNext(d, m)
}
// Unmarshal unmarshals a JSON object from r into m.
func Unmarshal(r io.Reader, m proto.Message) error {
return new(Unmarshaler).Unmarshal(r, m)
}
// UnmarshalString unmarshals a JSON object from s into m.
func UnmarshalString(s string, m proto.Message) error {
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
}
// Unmarshaler is a configurable object for converting from a JSON
// representation to a protocol buffer object.
type Unmarshaler struct {
// AllowUnknownFields specifies whether to allow messages to contain
// unknown JSON fields, as opposed to failing to unmarshal.
AllowUnknownFields bool
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
// they are unmarshaled from JSON. Messages that implement this should also
// implement JSONPBMarshaler so that the custom format can be produced.
//
// The JSON unmarshaling must follow the JSON to proto specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBUnmarshaler interface {
UnmarshalJSONPB(*Unmarshaler, []byte) error
}
// Unmarshal unmarshals a JSON object from r into m.
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
return u.UnmarshalNext(json.NewDecoder(r), m)
}
// UnmarshalNext unmarshals the next JSON object from d into m.
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
if m == nil {
return errors.New("invalid nil message")
}
// Parse the next JSON object from the stream.
raw := json.RawMessage{}
if err := d.Decode(&raw); err != nil {
return err
}
// Check for custom unmarshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsu, ok := m.(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, raw)
}
mr := proto.MessageReflect(m)
// NOTE: For historical reasons, a top-level null is treated as a noop.
// This is incorrect, but kept for compatibility.
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
return nil
}
if wrapJSONUnmarshalV2 {
// NOTE: If input message is non-empty, we need to preserve merge semantics
// of the old jsonpb implementation. These semantics are not supported by
// the protobuf JSON specification.
isEmpty := true
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
isEmpty = false // at least one iteration implies non-empty
return false
})
if !isEmpty {
// Perform unmarshaling into a newly allocated, empty message.
mr = mr.New()
// Use a defer to copy all unmarshaled fields into the original message.
dst := proto.MessageReflect(m)
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Set(fd, v)
return true
})
}
// Unmarshal using the v2 JSON unmarshaler.
opts := protojson.UnmarshalOptions{
DiscardUnknown: u.AllowUnknownFields,
}
if u.AnyResolver != nil {
opts.Resolver = anyResolver{u.AnyResolver}
}
return opts.Unmarshal(raw, mr.Interface())
} else {
if err := u.unmarshalMessage(mr, raw); err != nil {
return err
}
return protoV2.CheckInitialized(mr.Interface())
}
}
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
md := m.Descriptor()
fds := md.Fields()
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, in)
}
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
return nil
}
switch wellKnownType(md.FullName()) {
case "Any":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
rawTypeURL, ok := jsonObject["@type"]
if !ok {
return errors.New("Any JSON doesn't have '@type'")
}
typeURL, err := unquoteString(string(rawTypeURL))
if err != nil {
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
}
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
var m2 protoreflect.Message
if u.AnyResolver != nil {
mi, err := u.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
if err == protoregistry.NotFound {
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
}
return err
}
m2 = mt.New()
}
if wellKnownType(m2.Descriptor().FullName()) != "" {
rawValue, ok := jsonObject["value"]
if !ok {
return errors.New("Any JSON doesn't have 'value'")
}
if err := u.unmarshalMessage(m2, rawValue); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
} else {
delete(jsonObject, "@type")
rawJSON, err := json.Marshal(jsonObject)
if err != nil {
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
}
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
}
rawWire, err := protoV2.Marshal(m2.Interface())
if err != nil {
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
return nil
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
if err != nil {
return err
}
m.Set(fd, v)
return nil
case "Duration":
v, err := unquoteString(string(in))
if err != nil {
return err
}
d, err := time.ParseDuration(v)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
sec := d.Nanoseconds() / 1e9
nsec := d.Nanoseconds() % 1e9
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Timestamp":
v, err := unquoteString(string(in))
if err != nil {
return err
}
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
sec := t.Unix()
nsec := t.Nanosecond()
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Value":
switch {
case string(in) == "null":
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
case string(in) == "true":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
case string(in) == "false":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
case hasPrefixAndSuffix('"', in, '"'):
s, err := unquoteString(string(in))
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
case hasPrefixAndSuffix('[', in, ']'):
v := m.Mutable(fds.ByNumber(6))
return u.unmarshalMessage(v.Message(), in)
case hasPrefixAndSuffix('{', in, '}'):
v := m.Mutable(fds.ByNumber(5))
return u.unmarshalMessage(v.Message(), in)
default:
f, err := strconv.ParseFloat(string(in), 0)
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
}
return nil
case "ListValue":
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return fmt.Errorf("bad ListValue: %v", err)
}
lv := m.Mutable(fds.ByNumber(1)).List()
for _, raw := range jsonArray {
ve := lv.NewElement()
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
return err
}
lv.Append(ve)
}
return nil
case "Struct":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return fmt.Errorf("bad StructValue: %v", err)
}
mv := m.Mutable(fds.ByNumber(1)).Map()
for key, raw := range jsonObject {
kv := protoreflect.ValueOf(key).MapKey()
vv := mv.NewValue()
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
}
mv.Set(kv, vv)
}
return nil
}
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
// Handle known fields.
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if fd.IsWeak() && fd.Message().IsPlaceholder() {
continue // weak reference is not linked in
}
// Search for any raw JSON value associated with this field.
var raw json.RawMessage
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
name = string(fd.JSONName())
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
// Handle extension fields.
for name, raw := range jsonObject {
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
continue
}
// Resolve the extension field by name.
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(md) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
continue
}
delete(jsonObject, name)
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
if !u.AllowUnknownFields && len(jsonObject) > 0 {
for name := range jsonObject {
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
}
}
return nil
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
}
return false
}
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
return ok
}
return false
}
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch {
case fd.IsList():
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return v, err
}
lv := v.List()
for _, raw := range jsonArray {
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
if err != nil {
return v, err
}
lv.Append(ve)
}
return v, nil
case fd.IsMap():
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return v, err
}
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
for key, raw := range jsonObject {
var kv protoreflect.MapKey
if kfd.Kind() == protoreflect.StringKind {
kv = protoreflect.ValueOf(key).MapKey()
} else {
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
if err != nil {
return v, err
}
kv = v.MapKey()
}
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
if err != nil {
return v, err
}
mv.Set(kv, vv)
}
return v, nil
default:
return u.unmarshalSingularValue(v, in, fd)
}
}
var nonFinite = map[string]float64{
`"NaN"`: math.NaN(),
`"Infinity"`: math.Inf(+1),
`"-Infinity"`: math.Inf(-1),
}
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch fd.Kind() {
case protoreflect.BoolKind:
return unmarshalValue(in, new(bool))
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return unmarshalValue(trimQuote(in), new(int32))
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return unmarshalValue(trimQuote(in), new(int64))
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return unmarshalValue(trimQuote(in), new(uint32))
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return unmarshalValue(trimQuote(in), new(uint64))
case protoreflect.FloatKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat32(float32(f)), nil
}
return unmarshalValue(trimQuote(in), new(float32))
case protoreflect.DoubleKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat64(float64(f)), nil
}
return unmarshalValue(trimQuote(in), new(float64))
case protoreflect.StringKind:
return unmarshalValue(in, new(string))
case protoreflect.BytesKind:
return unmarshalValue(in, new([]byte))
case protoreflect.EnumKind:
if hasPrefixAndSuffix('"', in, '"') {
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
if vd == nil {
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
}
return protoreflect.ValueOfEnum(vd.Number()), nil
}
return unmarshalValue(in, new(protoreflect.EnumNumber))
case protoreflect.MessageKind, protoreflect.GroupKind:
err := u.unmarshalMessage(v.Message(), in)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
}
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
err := json.Unmarshal(in, v)
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
}
func unquoteString(in string) (out string, err error) {
err = json.Unmarshal([]byte(in), &out)
return out, err
}
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
return true
}
return false
}
// trimQuote is like unquoteString but simply strips surrounding quotes.
// This is incorrect, but is behavior done by the legacy implementation.
func trimQuote(in []byte) []byte {
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
in = in[1 : len(in)-1]
}
return in
}

View File

@ -0,0 +1,559 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONMarshalV2 = false
// Marshaler is a configurable object for marshaling protocol buffer messages
// to the specified JSON representation.
type Marshaler struct {
// OrigName specifies whether to use the original protobuf name for fields.
OrigName bool
// EnumsAsInts specifies whether to render enum values as integers,
// as opposed to string values.
EnumsAsInts bool
// EmitDefaults specifies whether to render fields with zero values.
EmitDefaults bool
// Indent controls whether the output is compact or not.
// If empty, the output is compact JSON. Otherwise, every JSON object
// entry and JSON array value will be on its own line.
// Each line will be preceded by repeated copies of Indent, where the
// number of copies is the current indentation depth.
Indent string
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBMarshaler is implemented by protobuf messages that customize the
// way they are marshaled to JSON. Messages that implement this should also
// implement JSONPBUnmarshaler so that the custom format can be parsed.
//
// The JSON marshaling must follow the proto to JSON specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBMarshaler interface {
MarshalJSONPB(*Marshaler) ([]byte, error)
}
// Marshal serializes a protobuf message as JSON into w.
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
b, err := jm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// MarshalToString serializes a protobuf message as JSON in string form.
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
b, err := jm.marshal(m)
if err != nil {
return "", err
}
return string(b), nil
}
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
v := reflect.ValueOf(m)
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, errors.New("Marshal called with nil")
}
// Check for custom marshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsm, ok := m.(JSONPBMarshaler); ok {
return jsm.MarshalJSONPB(jm)
}
if wrapJSONMarshalV2 {
opts := protojson.MarshalOptions{
UseProtoNames: jm.OrigName,
UseEnumNumbers: jm.EnumsAsInts,
EmitUnpopulated: jm.EmitDefaults,
Indent: jm.Indent,
}
if jm.AnyResolver != nil {
opts.Resolver = anyResolver{jm.AnyResolver}
}
return opts.Marshal(proto.MessageReflect(m).Interface())
} else {
// Check for unpopulated required fields first.
m2 := proto.MessageReflect(m)
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
return nil, err
}
w := jsonWriter{Marshaler: jm}
err := w.marshalMessage(m2, "", "")
return w.buf, err
}
}
type jsonWriter struct {
*Marshaler
buf []byte
}
func (w *jsonWriter) write(s string) {
w.buf = append(w.buf, s...)
}
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
b, err := jsm.MarshalJSONPB(w.Marshaler)
if err != nil {
return err
}
if typeURL != "" {
// we are marshaling this object to an Any type
var js map[string]*json.RawMessage
if err = json.Unmarshal(b, &js); err != nil {
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
}
turl, err := json.Marshal(typeURL)
if err != nil {
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
}
js["@type"] = (*json.RawMessage)(&turl)
if b, err = json.Marshal(js); err != nil {
return err
}
}
w.write(string(b))
return nil
}
md := m.Descriptor()
fds := md.Fields()
// Handle well-known types.
const secondInNanos = int64(time.Second / time.Nanosecond)
switch wellKnownType(md.FullName()) {
case "Any":
return w.marshalAny(m, indent)
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
case "Duration":
const maxSecondsInDuration = 315576000000
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
return fmt.Errorf("seconds out of range %v", s)
}
if ns <= -secondInNanos || ns >= secondInNanos {
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
}
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
return errors.New("signs of seconds and nanos do not match")
}
var sign string
if s < 0 || ns < 0 {
sign, s, ns = "-", -1*s, -1*ns
}
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vs"`, x))
return nil
case "Timestamp":
// "RFC 3339, where generated output will always be Z-normalized
// and uses 0, 3, 6 or 9 fractional digits."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if ns < 0 || ns >= secondInNanos {
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
}
t := time.Unix(s, ns).UTC()
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vZ"`, x))
return nil
case "Value":
// JSON value; which is a null, number, string, bool, object, or array.
od := md.Oneofs().Get(0)
fd := m.WhichOneof(od)
if fd == nil {
return errors.New("nil Value")
}
return w.marshalValue(fd, m.Get(fd), indent)
case "Struct", "ListValue":
// JSON object or array.
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
firstField := true
if typeURL != "" {
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
firstField = false
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
if fd == nil {
continue
}
} else {
i++
}
v := m.Get(fd)
if !m.Has(fd) {
if !w.EmitDefaults || fd.ContainingOneof() != nil {
continue
}
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
}
}
if !firstField {
w.writeComma()
}
if err := w.marshalField(fd, v, indent); err != nil {
return err
}
firstField = false
}
// Handle proto2 extensions.
if md.ExtensionRanges().Len() > 0 {
// Collect a sorted list of all extension descriptor and values.
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
if !firstField {
w.writeComma()
}
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
return err
}
firstField = false
}
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) writeComma() {
if w.Indent != "" {
w.write(",\n")
} else {
w.write(",")
}
}
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
// "If the Any contains a value that has a special JSON mapping,
// it will be converted as follows: {"@type": xxx, "value": yyy}.
// Otherwise, the value will be converted into a JSON object,
// and the "@type" field will be inserted to indicate the actual data type."
md := m.Descriptor()
typeURL := m.Get(md.Fields().ByNumber(1)).String()
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
var m2 protoreflect.Message
if w.AnyResolver != nil {
mi, err := w.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
return err
}
m2 = mt.New()
}
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
return err
}
if wellKnownType(m2.Descriptor().FullName()) == "" {
return w.marshalMessage(m2, indent, typeURL)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
w.writeComma()
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
w.write(`"value": `)
} else {
w.write(`"value":`)
}
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
return err
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"@type":`)
if w.Indent != "" {
w.write(" ")
}
b, err := json.Marshal(typeURL)
if err != nil {
return err
}
w.write(string(b))
return nil
}
// marshalField writes field description and value to the Writer.
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"`)
switch {
case fd.IsExtension():
// For message set, use the fname of the message as the extension name.
name := string(fd.FullName())
if isMessageSet(fd.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
w.write("[" + name + "]")
case w.OrigName:
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
w.write(name)
default:
w.write(string(fd.JSONName()))
}
w.write(`":`)
if w.Indent != "" {
w.write(" ")
}
return w.marshalValue(fd, v, indent)
}
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case fd.IsList():
w.write("[")
comma := ""
lv := v.List()
for i := 0; i < lv.Len(); i++ {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write("]")
return nil
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
// Collect a sorted list of all map keys and values.
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
w.write(`{`)
comma := ""
for _, entry := range entries {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
s := fmt.Sprint(entry.key.Interface())
b, err := json.Marshal(s)
if err != nil {
return err
}
w.write(string(b))
w.write(`:`)
if w.Indent != "" {
w.write(` `)
}
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write(`}`)
return nil
default:
return w.marshalSingularValue(fd, v, indent)
}
}
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case !v.IsValid():
w.write("null")
return nil
case fd.Message() != nil:
return w.marshalMessage(v.Message(), indent+w.Indent, "")
case fd.Enum() != nil:
if fd.Enum().FullName() == "google.protobuf.NullValue" {
w.write("null")
return nil
}
vd := fd.Enum().Values().ByNumber(v.Enum())
if vd == nil || w.EnumsAsInts {
w.write(strconv.Itoa(int(v.Enum())))
} else {
w.write(`"` + string(vd.Name()) + `"`)
}
return nil
default:
switch v.Interface().(type) {
case float32, float64:
switch {
case math.IsInf(v.Float(), +1):
w.write(`"Infinity"`)
return nil
case math.IsInf(v.Float(), -1):
w.write(`"-Infinity"`)
return nil
case math.IsNaN(v.Float()):
w.write(`"NaN"`)
return nil
}
case int64, uint64:
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
return nil
}
b, err := json.Marshal(v.Interface())
if err != nil {
return err
}
w.write(string(b))
return nil
}
}

View File

@ -0,0 +1,69 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonpb provides functionality to marshal and unmarshal between a
// protocol buffer message and JSON. It follows the specification at
// https://developers.google.com/protocol-buffers/docs/proto3#json.
//
// Do not rely on the default behavior of the standard encoding/json package
// when called on generated message types as it does not operate correctly.
//
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
// package instead.
package jsonpb
import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// AnyResolver takes a type URL, present in an Any message,
// and resolves it into an instance of the associated message.
type AnyResolver interface {
Resolve(typeURL string) (proto.Message, error)
}
type anyResolver struct{ AnyResolver }
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
return r.FindMessageByURL(string(message))
}
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
m, err := r.Resolve(url)
if err != nil {
return nil, err
}
return protoimpl.X.MessageTypeOf(m), nil
}
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
func wellKnownType(s protoreflect.FullName) string {
if s.Parent() == "google.protobuf" {
switch s.Name() {
case "Empty", "Any",
"BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue",
"Duration", "Timestamp",
"NullValue", "Struct", "Value", "ListValue":
return string(s.Name())
}
}
return ""
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@ -1,6 +1,29 @@
## unreleased
## 1.5.0
* Fix regression where `*time.Time` value would be set to empty and not be sent
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
without `mapstructure` (or the configured tag name) set [GH-277]
* New option `ErrorUnset` which makes it an error if any fields
in a target struct are not set by the decoding process. [GH-225]
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
* Decoding to slice from array no longer crashes [GH-265]
* Decode nested struct pointers to map [GH-271]
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
* Fix issue where fields with `,omitempty` would sometimes decode
into a map with an empty string key [GH-281]
## 1.4.3
* Fix cases where `json.Number` didn't decode properly [GH-261]
## 1.4.2
* Custom name matchers to support any sort of casing, formatting, etc. for
field names. [GH-250]
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
## 1.4.1
* Fix regression where `*time.Time` value would be set to empty and not be sent
to decode hooks properly [GH-232]
## 1.4.0

View File

@ -62,7 +62,8 @@ func DecodeHookExec(
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
var data interface{}
data := f.Interface()
newFrom := f
for _, f1 := range fs {
data, err = DecodeHookExec(f1, newFrom, t)
@ -76,6 +77,28 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
}
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
return func(a, b reflect.Value) (interface{}, error) {
var allErrs string
var out interface{}
var err error
for _, f := range ff {
out, err = DecodeHookExec(f, a, b)
if err != nil {
allErrs += err.Error() + "\n"
continue
}
return out, nil
}
return nil, errors.New(allErrs)
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {

View File

@ -122,7 +122,7 @@
// field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type.
//
// type Source {
// type Source struct {
// Age int `mapstructure:",omitempty"`
// }
//
@ -192,7 +192,7 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target
// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
// values.
type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
@ -215,6 +215,12 @@ type DecoderConfig struct {
// (extra keys).
ErrorUnused bool
// If ErrorUnset is true, then it is an error for there to exist
// fields in the result that were not set in the decoding process
// (extra fields). This only applies to decoding to a struct. This
// will affect all nested structs as well.
ErrorUnset bool
// ZeroFields, if set to true, will zero fields before writing them.
// For example, a map will be emptied before decoded values are put in
// it. If this is false, a map will be merged.
@ -258,6 +264,15 @@ type DecoderConfig struct {
// The tag name that mapstructure reads for field names. This
// defaults to "mapstructure"
TagName string
// IgnoreUntaggedFields ignores all struct fields without explicit
// TagName, comparable to `mapstructure:"-"` as default behaviour.
IgnoreUntaggedFields bool
// MatchName is the function used to match the map key to the struct
// field name or tag. Defaults to `strings.EqualFold`. This can be used
// to implement case-sensitive tag values, support snake casing, etc.
MatchName func(mapKey, fieldName string) bool
}
// A Decoder takes a raw interface value and turns it into structured
@ -279,6 +294,11 @@ type Metadata struct {
// Unused is a slice of keys that were found in the raw value but
// weren't decoded since there was no matching field in the result interface
Unused []string
// Unset is a slice of field names that were found in the result interface
// but weren't set in the decoding process since there was no matching value
// in the input
Unset []string
}
// Decode takes an input structure and uses reflection to translate it to
@ -370,12 +390,20 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
if config.Metadata.Unused == nil {
config.Metadata.Unused = make([]string, 0)
}
if config.Metadata.Unset == nil {
config.Metadata.Unset = make([]string, 0)
}
}
if config.TagName == "" {
config.TagName = "mapstructure"
}
if config.MatchName == nil {
config.MatchName = strings.EqualFold
}
result := &Decoder{
config: config,
}
@ -675,16 +703,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := jn.Int64()
i, err := strconv.ParseUint(string(jn), 0, 64)
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
}
if i < 0 && !d.config.WeaklyTypedInput {
return fmt.Errorf("cannot parse '%s', %d overflows uint",
name, i)
}
val.SetUint(uint64(i))
val.SetUint(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
@ -901,9 +925,15 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
tagValue := f.Tag.Get(d.config.TagName)
keyName := f.Name
if tagValue == "" && d.config.IgnoreUntaggedFields {
continue
}
// If Squash is set in the config, we squash the field down.
squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
// Determine the name of the key in the map
if index := strings.Index(tagValue, ","); index != -1 {
if tagValue[:index] == "-" {
@ -915,7 +945,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
}
// If "squash" is specified in the tag, we squash the field down.
squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
if squash {
// When squashing, the embedded type can be a pointer to a struct.
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
@ -927,7 +957,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
}
}
keyName = tagValue[:index]
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
keyName = keyNameTagValue
}
} else if len(tagValue) > 0 {
if tagValue == "-" {
continue
@ -1083,7 +1115,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
}
// If the input value is nil, then don't allocate since empty != nil
if dataVal.IsNil() {
if dataValKind != reflect.Array && dataVal.IsNil() {
return nil
}
@ -1245,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
}
targetValKeysUnused := make(map[interface{}]struct{})
errors := make([]string, 0)
// This slice will keep track of all the structs we'll be decoding.
@ -1340,7 +1373,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
continue
}
if strings.EqualFold(mK, fieldName) {
if d.config.MatchName(mK, fieldName) {
rawMapKey = dataValKey
rawMapVal = dataVal.MapIndex(dataValKey)
break
@ -1349,7 +1382,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
if !rawMapVal.IsValid() {
// There was no matching key in the map for the value in
// the struct. Just ignore.
// the struct. Remember it for potential errors and metadata.
targetValKeysUnused[fieldName] = struct{}{}
continue
}
}
@ -1409,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
errors = appendErrors(errors, err)
}
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
keys := make([]string, 0, len(targetValKeysUnused))
for rawKey := range targetValKeysUnused {
keys = append(keys, rawKey.(string))
}
sort.Strings(keys)
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
errors = appendErrors(errors, err)
}
if len(errors) > 0 {
return &Error{errors}
}
@ -1423,6 +1468,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
}
for rawKey := range targetValKeysUnused {
key := rawKey.(string)
if name != "" {
key = name + "." + key
}
d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
}
}
return nil
@ -1460,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind {
return kind
}
}
func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
for i := 0; i < typ.NumField(); i++ {
f := typ.Field(i)
if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
return true
}
if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
return true
}
}
return false
}
func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return v
}
deref := v.Elem()
derefT := deref.Type()
if isStructTypeConvertibleToMap(derefT, true, tagName) {
return deref
}
return v
}

View File

@ -2,4 +2,4 @@ module github.com/moby/sys/mountinfo
go 1.16
require golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
require golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a

View File

@ -1,2 +1,2 @@
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -7,6 +7,34 @@ import (
"golang.org/x/sys/unix"
)
// MountedFast is a method of detecting a mount point without reading
// mountinfo from procfs. A caller can only trust the result if no error
// and sure == true are returned. Otherwise, other methods (e.g. parsing
// /proc/mounts) have to be used. If unsure, use Mounted instead (which
// uses MountedFast, but falls back to parsing mountinfo if needed).
//
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
//
// This function is only available on Linux. When available (since kernel
// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
// the implementation falls back to using stat(2), which can reliably detect
// normal (but not bind) mounts.
func MountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
path, err = normalizePath(path)
if err != nil {
return false, false, err
}
mounted, sure, err = mountedFast(path)
return
}
// mountedByOpenat2 is a method of detecting a mount that works for all kinds
// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel.
func mountedByOpenat2(path string) (bool, error) {
@ -34,24 +62,40 @@ func mountedByOpenat2(path string) (bool, error) {
return false, &os.PathError{Op: "openat2", Path: path, Err: err}
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
// mountedFast is similar to MountedFast, except it expects a normalized path.
func mountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
// Try a fast path, using openat2() with RESOLVE_NO_XDEV.
mounted, err := mountedByOpenat2(path)
mounted, err = mountedByOpenat2(path)
if err == nil {
return mounted, nil
return mounted, true, nil
}
// Another fast path: compare st.st_dev fields.
mounted, err = mountedByStat(path)
// This does not work for bind mounts, so false negative
// is possible, therefore only trust if return is true.
if mounted && err == nil {
return true, true, nil
}
return
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
}
mounted, sure, err := mountedFast(path)
if sure && err == nil {
return mounted, nil
}
// Fallback to parsing mountinfo
// Fallback to parsing mountinfo.
return mountedByMountinfo(path)
}

View File

@ -1,10 +1,9 @@
//go:build linux || (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo)
// +build linux freebsd,cgo openbsd,cgo darwin,cgo
//go:build linux || freebsd || openbsd || darwin
// +build linux freebsd openbsd darwin
package mountinfo
import (
"fmt"
"os"
"path/filepath"
@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) {
func normalizePath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
return "", err
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
return "", err
}
if _, err := os.Stat(realPath); err != nil {
return "", fmt.Errorf("failed to stat target of %q: %w", path, err)
return "", err
}
return realPath, nil
}

View File

@ -13,9 +13,9 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
// Mounted determines if a specified path is a mount point. In case of any
// error, false (and an error) is returned.
//
// The non-existent path returns an error. If a caller is not interested
// in this particular error, it should handle it separately using e.g.
// errors.Is(err, os.ErrNotExist).
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
func Mounted(path string) (bool, error) {
// root is always mounted
if path == string(os.PathSeparator) {

View File

@ -1,53 +1,37 @@
//go:build (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo)
// +build freebsd,cgo openbsd,cgo darwin,cgo
//go:build freebsd || openbsd || darwin
// +build freebsd openbsd darwin
package mountinfo
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
*/
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
import "golang.org/x/sys/unix"
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
if count == 0 {
return nil, fmt.Errorf("failed to call getmntinfo")
count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
if err != nil {
return nil, err
}
var entries []C.struct_statfs
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
header.Cap = count
header.Len = count
header.Data = uintptr(unsafe.Pointer(rawEntries))
entries := make([]unix.Statfs_t, count)
_, err = unix.Getfsstat(entries, unix.MNT_WAIT)
if err != nil {
return nil, err
}
var out []*Info
for _, entry := range entries {
var mountinfo Info
var skip, stop bool
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
mountinfo := getMountinfo(&entry)
if filter != nil {
// filter out entries we're not interested in
skip, stop = filter(&mountinfo)
skip, stop = filter(mountinfo)
if skip {
continue
}
}
out = append(out, &mountinfo)
out = append(out, mountinfo)
if stop {
break
}

View File

@ -0,0 +1,14 @@
//go:build freebsd || darwin
// +build freebsd darwin
package mountinfo
import "golang.org/x/sys/unix"
func getMountinfo(entry *unix.Statfs_t) *Info {
return &Info{
Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]),
FSType: unix.ByteSliceToString(entry.Fstypename[:]),
Source: unix.ByteSliceToString(entry.Mntfromname[:]),
}
}

View File

@ -0,0 +1,11 @@
package mountinfo
import "golang.org/x/sys/unix"
func getMountinfo(entry *unix.Statfs_t) *Info {
return &Info{
Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]),
FSType: unix.ByteSliceToString(entry.F_fstypename[:]),
Source: unix.ByteSliceToString(entry.F_mntfromname[:]),
}
}

View File

@ -1,5 +1,5 @@
//go:build (!windows && !linux && !freebsd && !openbsd && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) || (darwin && !cgo)
// +build !windows,!linux,!freebsd,!openbsd,!darwin freebsd,!cgo openbsd,!cgo darwin,!cgo
//go:build !windows && !linux && !freebsd && !openbsd && !darwin
// +build !windows,!linux,!freebsd,!openbsd,!darwin
package mountinfo

View File

@ -15,7 +15,7 @@ type Spec struct {
// Mounts configures additional mounts (on top of Root).
Mounts []Mount `json:"mounts,omitempty"`
// Hooks configures callbacks for container lifecycle events.
Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"`
Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"`
// Annotations contains arbitrary metadata for the container.
Annotations map[string]string `json:"annotations,omitempty"`
@ -27,6 +27,8 @@ type Spec struct {
Windows *Windows `json:"windows,omitempty" platform:"windows"`
// VM specifies configuration for virtual-machine-based containers.
VM *VM `json:"vm,omitempty" platform:"vm"`
// ZOS is platform-specific configuration for z/OS based containers.
ZOS *ZOS `json:"zos,omitempty" platform:"zos"`
}
// Process contains information to start a specific application inside the container.
@ -49,7 +51,7 @@ type Process struct {
// Capabilities are Linux capabilities that are kept for the process.
Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"`
// Rlimits specifies rlimit options to apply to the process.
Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"`
Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"`
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"`
// ApparmorProfile specifies the apparmor profile for the container.
@ -86,11 +88,11 @@ type Box struct {
// User specifies specific user (and group) information for the container process.
type User struct {
// UID is the user id.
UID uint32 `json:"uid" platform:"linux,solaris"`
UID uint32 `json:"uid" platform:"linux,solaris,zos"`
// GID is the group id.
GID uint32 `json:"gid" platform:"linux,solaris"`
GID uint32 `json:"gid" platform:"linux,solaris,zos"`
// Umask is the umask for the init process.
Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"`
Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"`
// AdditionalGids are additional group ids set for the container's process.
AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
// Username is the user name.
@ -110,7 +112,7 @@ type Mount struct {
// Destination is the absolute path where the mount will be placed in the container.
Destination string `json:"destination"`
// Type specifies the mount kind.
Type string `json:"type,omitempty" platform:"linux,solaris"`
Type string `json:"type,omitempty" platform:"linux,solaris,zos"`
// Source specifies the source path of the mount.
Source string `json:"source,omitempty"`
// Options are fstab style mount options.
@ -178,7 +180,7 @@ type Linux struct {
// MountLabel specifies the selinux context for the mounts in the container.
MountLabel string `json:"mountLabel,omitempty"`
// IntelRdt contains Intel Resource Director Technology (RDT) information for
// handling resource constraints (e.g., L3 cache, memory bandwidth) for the container
// handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
// Personality contains configuration for the Linux personality syscall
Personality *LinuxPersonality `json:"personality,omitempty"`
@ -683,8 +685,9 @@ type LinuxSyscall struct {
Args []LinuxSeccompArg `json:"args,omitempty"`
}
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel
// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA
// features and flags enabling Intel RDT CMT and MBM features.
// Intel RDT features are available in Linux 4.14 and newer kernel versions.
type LinuxIntelRdt struct {
// The identity for RDT Class of Service
ClosID string `json:"closID,omitempty"`
@ -697,4 +700,36 @@ type LinuxIntelRdt struct {
// The unit of memory bandwidth is specified in "percentages" by
// default, and in "MBps" if MBA Software Controller is enabled.
MemBwSchema string `json:"memBwSchema,omitempty"`
// EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of
// the last-level cache (LLC) occupancy for the container.
EnableCMT bool `json:"enableCMT,omitempty"`
// EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of
// total and local memory bandwidth for the container.
EnableMBM bool `json:"enableMBM,omitempty"`
}
// ZOS contains platform-specific configuration for z/OS based containers.
type ZOS struct {
// Devices are a list of device nodes that are created for the container
Devices []ZOSDevice `json:"devices,omitempty"`
}
// ZOSDevice represents the mknod information for a z/OS special device file
type ZOSDevice struct {
// Path to the device.
Path string `json:"path"`
// Device type, block, char, etc.
Type string `json:"type"`
// Major is the device's major number.
Major int64 `json:"major"`
// Minor is the device's minor number.
Minor int64 `json:"minor"`
// FileMode permission bits for the device.
FileMode *os.FileMode `json:"fileMode,omitempty"`
// UID of the device.
UID *uint32 `json:"uid,omitempty"`
// Gid of the device.
GID *uint32 `json:"gid,omitempty"`
}

View File

@ -1 +1 @@
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

View File

@ -0,0 +1,38 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "runtime/debug"
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View File

@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -133,10 +133,14 @@ func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) Write(out *dto.Metric) error {
func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return fval + float64(ival)
}
func (c *counter) Write(out *dto.Metric) error {
val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {

View File

@ -16,32 +16,209 @@ package prometheus
import (
"runtime"
"runtime/debug"
"sync"
"time"
)
type goCollector struct {
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
}
}
type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
gcLastTimeDesc *Desc
goInfoDesc *Desc
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
return &goCollector{
func newBaseGoCollector() baseGoCollector {
return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@ -54,243 +231,28 @@ func NewGoCollector() Collector {
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
},
}
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@ -305,63 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// memStatsMetrics provide description, value, and value type for memstat metrics.
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

Some files were not shown because too many files have changed in this diff Show More